diff --git a/README.md b/README.md index ebb3e2d..153652e 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@

SOTA SWE

-**SotaSWE is a VSCode extension built atop [the leading agentic framework](https://github.com/codestoryai/sidecar) on SWE-bench Lite.** +**SotaSWE is a VSCode extension built atop [the leading agentic framework](https://github.com/codestoryai/sidecar) on SWE-bench Lite. This is a working experimental repository for Memory LLM Agents using Pydantic to simulate human learning and thought and for aider using pydantic and custom tool support. I also plan on creating a Frontend using Roo-Cline and combining its features with the SOTA SWE features as well as the above. This build may be unstable so use with caution.** ![Latest release](https://img.shields.io/github/v/release/codestoryai/extension?label=version) ![Discord Shield](https://discord.com/api/guilds/1138070673756004464/widget.png?style=shield) @@ -32,10 +32,47 @@ There are many ways in which you can participate in this project, for example: If you are interested in fixing issues and contributing directly to the code base, +### System Requirements + +The extension requires the following system dependencies: +- libssl3 (OpenSSL 3.0) +- Python 3.9 or newer +- pydantic v2 + +These dependencies will be automatically installed when running in GitHub Codespaces. For local development: + +#### Ubuntu/Debian +```shell +# Add Ubuntu 22.04 repository for libssl3 +echo "deb http://archive.ubuntu.com/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/jammy.list +sudo apt-get update +sudo apt-get install -y libssl3 python3.9 python3-pip +python3 -m pip install --upgrade pip +python3 -m pip install -U pydantic +``` + +#### macOS +```shell +brew install openssl@3 +brew install python@3.9 +python3 -m pip install --upgrade pip +python3 -m pip install -U pydantic +``` + +#### Windows +The required OpenSSL libraries are bundled with the Windows binary. +```shell +# Install Python 3.9 or newer using Windows Store or from python.org +python3 -m pip install --upgrade pip +python3 -m pip install -U pydantic +``` + +### Development Setup + 1. Install dependencies ```shell - npm install +npm install ``` 2. Run the extension in development mode with hot-reload diff --git a/agentic-memory/README.md b/agentic-memory/README.md new file mode 100644 index 0000000..ee048bf --- /dev/null +++ b/agentic-memory/README.md @@ -0,0 +1,18 @@ +# Agent Memory - Can LLMs *Really* Think? + + + +*[Cognitive Architectures for Language Agents, 2024](https://arxiv.org/pdf/2309.02427)* + +LLMs are considered "stateless" in that every time you invoke an LLM call, it is like the first time it's ever seen the input being passed through. Given this quirk, multi-turn LLM agents have a unique challenge to overcome with fully understanding and navigating a vast world model which we humans do naturally. + +Being a human has a lot of advantages over a language model when executing a task. We bring our general knowledge about the world and lived experience, our understanding of prior similar task experiences and their takeaways, what we've specifically learned how to do or been taught, and then our ability to instantly contextualize and shape our approach to a task as we're working through it. In essence, we have advanced memory and the ability to learn from and apply learnings to new experiences. + +LLMs sort of have some memory, mostly their general knowledge or traits picked up from training and additional fine tuning but suffer from a lack of the other characteristics outlined prior. To compensate for this, we can model different forms of memory, recall, and learning within our agentic system design. Specifically, we'll create a simple RAG agent to model 4 kinds of memory: + +- **Working Memory** - Current conversation and immediate context +- **Episodic Memory** - Historical experiences and their takeaways +- **Semantic Memory** - Knowledge context and factual grounding +- **Procedural Memory** - The "rules" and "skills" for interaction + +These four memory systems provide a holistic approach to understanding and architecting a part of cognitive design into an agent application. In this notebook we'll break down each type of memory and an example approach to implementing them into a whole agent experience. diff --git a/agentic-memory/agentic_memory.py b/agentic-memory/agentic_memory.py new file mode 100644 index 0000000..2eff77f --- /dev/null +++ b/agentic-memory/agentic_memory.py @@ -0,0 +1,197 @@ +from datetime import datetime +from typing import List, Optional, Set +from pydantic import BaseModel, Field +from langchain_openai import ChatOpenAI +from langchain_core.messages import HumanMessage, SystemMessage, AIMessage +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +import weaviate + +# Base Message Model +class Message(BaseModel): + """Base message model for all communications""" + role: str + content: str + timestamp: datetime = Field(default_factory=datetime.now) + +# Memory Models +class WorkingMemory(BaseModel): + """Stores current conversation context and active state""" + messages: List[Message] = [] + system_prompt: str = "You are a helpful AI Assistant." + semantic_context: Optional[str] = None + +class EpisodicMemory(BaseModel): + """Stores historical experiences and reflections""" + conversation: str + context_tags: List[str] + conversation_summary: str + what_worked: str + what_to_avoid: str + +class SemanticMemory(BaseModel): + """Stores factual knowledge and information""" + chunk: str + +class ProceduralMemory(BaseModel): + """Stores interaction guidelines and learned behaviors""" + guidelines: List[str] + +# Memory Tools +class MemoryTools(BaseModel): + """Tools for memory operations""" + + def format_conversation(self, messages: List[Message]) -> str: + """Format messages into a readable conversation string""" + conversation = [] + for message in messages[1:]: # Skip system message + conversation.append(f"{message.role.upper()}: {message.content}") + return "\n".join(conversation) + + def episodic_recall(self, query: str, vdb_client) -> EpisodicMemory: + """Retrieve relevant episodic memory""" + episodic_memory = vdb_client.collections.get("episodic_memory") + memory = episodic_memory.query.hybrid( + query=query, + alpha=0.5, + limit=1, + ) + props = memory.objects[0].properties + return EpisodicMemory( + conversation=props['conversation'], + context_tags=props['context_tags'], + conversation_summary=props['conversation_summary'], + what_worked=props['what_worked'], + what_to_avoid=props['what_to_avoid'] + ) + + def semantic_recall(self, query: str, vdb_client) -> str: + """Retrieve relevant semantic knowledge""" + coala_collection = vdb_client.collections.get("CoALA_Paper") + memories = coala_collection.query.hybrid( + query=query, + alpha=0.5, + limit=15, + ) + combined_text = "" + for i, memory in enumerate(memories.objects): + combined_text += f"\nCHUNK {i+1}:\n" + combined_text += memory.properties['chunk'].strip() + return combined_text + + def load_procedural_memory(self) -> ProceduralMemory: + """Load procedural memory guidelines""" + with open("./procedural_memory.txt", "r") as content: + guidelines = content.read().split("\n") + return ProceduralMemory(guidelines=guidelines) + +# Memory Agent +class MemoryAgent(BaseModel): + """Main agent class integrating all memory types""" + working_memory: WorkingMemory = Field(default_factory=WorkingMemory) + tools: MemoryTools = Field(default_factory=MemoryTools) + llm: ChatOpenAI = Field(default_factory=lambda: ChatOpenAI(temperature=0.7, model="gpt-4")) + vdb_client: Optional[object] = None + + def initialize(self, vdb_client): + """Initialize the agent with vector database client""" + self.vdb_client = vdb_client + + def update_system_prompt(self, query: str) -> str: + """Update system prompt with memory context""" + # Get episodic memory + episodic = self.tools.episodic_recall(query, self.vdb_client) + + # Load procedural memory + procedural = self.tools.load_procedural_memory() + + # Format system prompt + prompt = f"""You are a helpful AI Assistant. Answer the user's questions to the best of your ability. + You recall similar conversations with the user, here are the details: + + Current Conversation Match: {episodic.conversation} + What has worked well: {episodic.what_worked} + What to avoid: {episodic.what_to_avoid} + + Use these memories as context for your response to the user. + + Additionally, here are guidelines for interactions with the current user: + {' '.join(procedural.guidelines)}""" + + return prompt + + def get_semantic_context(self, query: str) -> str: + """Get relevant semantic context""" + context = self.tools.semantic_recall(query, self.vdb_client) + return f"""If needed, Use this grounded context to factually answer the next question. + Let me know if you do not have enough information or context to answer a question. + + {context} + """ + + def process_message(self, user_input: str) -> str: + """Process user message and generate response""" + # Update system prompt + system_prompt = self.update_system_prompt(user_input) + system_message = SystemMessage(content=system_prompt) + + # Get semantic context + semantic_context = self.get_semantic_context(user_input) + semantic_message = HumanMessage(content=semantic_context) + + # Create user message + user_message = HumanMessage(content=user_input) + + # Update working memory + self.working_memory.messages = [ + system_message, + *[msg for msg in self.working_memory.messages if not isinstance(msg, SystemMessage)], + semantic_message, + user_message + ] + + # Generate response + response = self.llm.invoke(self.working_memory.messages) + + # Add response to working memory + self.working_memory.messages.append(response) + + return response.content + + def save_episodic_memory(self): + """Save conversation to episodic memory""" + conversation = self.tools.format_conversation(self.working_memory.messages) + + # Create reflection using LLM + reflection_prompt = ChatPromptTemplate.from_template(""" + You are analyzing conversations to create memories that will help guide future interactions. + Review the conversation and create a memory reflection following these rules: + 1. For any field where you don't have enough information, use "N/A" + 2. Be extremely concise - each string should be one clear, actionable sentence + 3. Focus only on information that would be useful for future conversations + 4. Context_tags should be specific enough to match similar situations but general enough to be reusable + + Output valid JSON in exactly this format: + { + "context_tags": [string], + "conversation_summary": string, + "what_worked": string, + "what_to_avoid": string + } + + Here is the conversation: + {conversation} + """) + + reflection = reflection_prompt | self.llm | JsonOutputParser() + memory = reflection.invoke({"conversation": conversation}) + + # Save to vector database + episodic_memory = self.vdb_client.collections.get("episodic_memory") + episodic_memory.data.insert({ + "conversation": conversation, + "context_tags": memory['context_tags'], + "conversation_summary": memory['conversation_summary'], + "what_worked": memory['what_worked'], + "what_to_avoid": memory['what_to_avoid'], + }) \ No newline at end of file diff --git a/agentic-memory/agentic_memory.txt b/agentic-memory/agentic_memory.txt new file mode 100644 index 0000000..5e694ae --- /dev/null +++ b/agentic-memory/agentic_memory.txt @@ -0,0 +1,2310 @@ +# Agent Memory - Can LLMs *Really* Think? + + + +*[Cognitive Architectures for Language Agents, 2024](https://arxiv.org/pdf/2309.02427)* + +LLMs are considered "stateless" in that every time you invoke an LLM call, it is like the first time it's ever seen the input being passed through. Given this quirk, multi-turn LLM agents have a unique challenge to overcome with fully understanding and navigating a vast world model which we humans do naturally. + +Being a human has a lot of advantages over a language model when executing a task. We bring our general knowledge about the world and lived experience, our understanding of prior similar task experiences and their takeaways, what we've specifically learned how to do or been taught, and then our ability to instantly contextualize and shape our approach to a task as we're working through it. In essence, we have advanced memory and the ability to learn from and apply learnings to new experiences. + +LLMs sort of have some memory, mostly their general knowledge or traits picked up from training and additional fine tuning but suffer from a lack of the other characteristics outlined prior. To compensate for this, we can model different forms of memory, recall, and learning within our agentic system design. Specifically, we'll create a simple RAG agent to model 4 kinds of memory: + +- **Working Memory** - Current conversation and immediate context +- **Episodic Memory** - Historical experiences and their takeaways +- **Semantic Memory** - Knowledge context and factual grounding +- **Procedural Memory** - The "rules" and "skills" for interaction + +These four memory systems provide a holistic approach to understanding and architecting a part of cognitive design into an agent application. In this notebook we'll break down each type of memory and an example approach to implementing them into a whole agent experience. + +--- + +## Setting Up Dependencies + +```python +from pydantic import BaseModel, Field +from typing import List, Set, Optional +from datetime import datetime +from langchain_openai import ChatOpenAI +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.output_parsers import JsonOutputParser +import weaviate +``` + +## Memory Models + +```python +class Message(BaseModel): + role: str + content: str + timestamp: datetime = Field(default_factory=datetime.now) + +class WorkingMemory(BaseModel): + messages: List[Message] = [] + system_prompt: str + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are a helpful AI Assistant.\",\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": \"Hello, how are you?\",\n", + " },\n", + " {\n", + " \"role\": \"assistant\",\n", + " \"content\": \"I'm doing well, thank you for asking.\",\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": \"Can you tell me a joke?\",\n", + " }\n", + "])\n", + "```\n", + "\n", + "Remembering from working memory involves direct access to recent contextual data and action/result pairs. The system leverages immediate accessibility to maintain conversational coherence through continuous monitoring of the active message history, current state parameters, and ongoing computational processes. This direct access enables appropriate response generation grounded in the immediate conversational context.\n", + "\n", + "Learning in working memory operates through continuous state updates during conversational processing. The system dynamically integrates new messages into the active context, updates state representations, modifies goal parameters, and maintains temporal coherence across the interaction. This real-time learning process differs fundamentally from the persistent storage mechanisms of episodic and semantic memory systems.\n", + "\n", + "Working memory functions as the active computational interface, coordinating information flow between episodic experience retrieval and semantic knowledge access while maintaining precise state awareness of the current interaction.\n", + "\n", + "\n", + "\n", + "Looking at a simple example:" + ] + }, + { + "cell_type": "markdown", + "id": "8e7d5cc0-35d8-4f94-a68e-d0876417de7b", + "metadata": {}, + "source": [ + "**Instantiate the Language Model**" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3145ac1f-81bf-4aa0-a542-1ebc24c2ac1e", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import ChatOpenAI\n", + "\n", + "llm = ChatOpenAI(temperature=0.7, model=\"gpt-4o\")" + ] + }, + { + "cell_type": "markdown", + "id": "a83ca92a-70c4-4d94-afcd-4910d8a83bb2", + "metadata": {}, + "source": [ + "**Create Simple Back & Forth Chat Flow**" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5f3d3fa7-2109-449b-95f6-077b10622704", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: Hello!\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: Hello! How can I assist you today?\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What's my name\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: I'm sorry, but I don't have access to personal information, so I don't know your name.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: Oh my name is Adam!\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: Nice to meet you, Adam! How can I help you today?\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What's my name?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: Your name is Adam.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: exit\n" + ] + } + ], + "source": [ + "from langchain_core.messages import HumanMessage, SystemMessage\n", + "\n", + "# Define System Prompt\n", + "system_prompt = SystemMessage(\"You are a helpful AI Assistant. Answer the User's queries succinctly in one sentence.\")\n", + "\n", + "# Start Storage for Historical Message History\n", + "messages = [system_prompt]\n", + "\n", + "while True:\n", + "\n", + " # Get User's Message\n", + " user_message = HumanMessage(input(\"\\nUser: \"))\n", + " \n", + " if user_message.content.lower() == \"exit\":\n", + " break\n", + "\n", + " else:\n", + " # Extend Messages List With User Message\n", + " messages.append(user_message)\n", + "\n", + " # Pass Entire Message Sequence to LLM to Generate Response\n", + " response = llm.invoke(messages)\n", + " \n", + " print(\"\\nAI Message: \", response.content)\n", + "\n", + " # Add AI's Response to Message List\n", + " messages.append(response)" + ] + }, + { + "cell_type": "markdown", + "id": "91f0bb18-730e-4bf7-bc91-d60f60c28d32", + "metadata": {}, + "source": [ + "Keeping track of our total conversation allows the LLM to use prior messages and interactions as context for immediate responses during an ongoing conversation, keeping our current interaction in working memory and recalling working memory through attaching it as context for subsequent response generations. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "61dddc2a-9ad6-49e5-8e1a-1cfad3f358da", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Message 1 - SYSTEM: You are a helpful AI Assistant. Answer the User's queries succinctly in one sentence.\n", + "\n", + "Message 2 - HUMAN: Hello!\n", + "\n", + "Message 3 - AI: Hello! How can I assist you today?\n", + "\n", + "Message 4 - HUMAN: What's my name\n", + "\n", + "Message 5 - AI: I'm sorry, but I don't have access to personal information, so I don't know your name.\n", + "\n", + "Message 6 - HUMAN: Oh my name is Adam!\n", + "\n", + "Message 7 - AI: Nice to meet you, Adam! How can I help you today?\n", + "\n", + "Message 8 - HUMAN: What's my name?\n", + "\n", + "Message 9 - AI: Your name is Adam.\n" + ] + } + ], + "source": [ + "# Looking into our Memory\n", + "\n", + "for i in range(len(messages)):\n", + " print(f\"\\nMessage {i+1} - {messages[i].type.upper()}: \", messages[i].content)\n", + " i += 1" + ] + }, + { + "cell_type": "markdown", + "id": "5867cf90-fafd-444e-bb0b-f9b52be7836d", + "metadata": {}, + "source": [ + "---\n", + "## Episodic Memory\n", + "\n", + "\n", + "\n", + "*[Tell me why: the missing w in episodic memory’s what, where, and when](https://link.springer.com/article/10.3758/s13415-024-01234-4)*\n", + "\n", + "Episodic memory is a historical collection of prior experiences, or episodes. This can be both the literal recollection of how something happened and also any non-explicitly stated takeaways. When encountering a specific situation, you may recall similar related events that you've been in and their outcomes, which shape the way we approach new, comparable experiences.\n", + "\n", + "For a chatbot, this includes both raw conversations it has participated in and the analytical understanding gained from those interactions. The act of remembering is implemented through dynamic [few-shot prompting](https://www.promptingguide.ai/techniques/fewshot), automatically providing similar successful examples and instructions to better guide an LLM's response on subsequent similar queries.\n", + "\n", + "But we don't just recall similar experiences - we also extract takeaways (or learning) from interactions. Learning in episodic memory happens through two processes: automatic storage of complete conversations, and generation of post-conversation analysis. The system stores full interaction sequences while implementing reflection protocols to identify what worked, what didn't, and what can be learned for future situations. This dual approach enables both specific recall and strategic learning for future conversations.\n", + "\n", + "\n", + "\n", + "Episodic memory serves as the system's experiential foundation, allowing it to adapt its behavior based on accumulated conversation history while maintaining access to proven interaction patterns and their associated learnings. This creates a continuously improving system that learns not just from individual interactions, but from the patterns and insights derived across multiple conversations.\n", + "\n", + "Let's implement this reflection, storage and retrieval:" + ] + }, + { + "cell_type": "markdown", + "id": "41446e2b-ace3-4221-a64e-f76d5079f07b", + "metadata": {}, + "source": [ + "**Creating a Reflection Chain**\n", + "\n", + "This is where historical messages can be input, and episodic memories will be output. Given a message history, you will receive\n", + "\n", + "```python\n", + "{\n", + " \"context_tags\": [ # 2-4 keywords that would help identify similar future conversations\n", + " string, # Use field-specific terms like \"deep_learning\", \"methodology_question\", \"results_interpretation\"\n", + " ...\n", + " ],\n", + " \"conversation_summary\": string, # One sentence describing what the conversation accomplished\n", + " \"what_worked\": string, # Most effective approach or strategy used in this conversation\n", + " \"what_to_avoid\": string # Most important pitfall or ineffective approach to avoid\n", + "}\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c234c48f-54d9-4bc7-b620-88c7c38d1a4e", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.output_parsers import JsonOutputParser\n", + "\n", + "reflection_prompt_template = \"\"\"\n", + "You are analyzing conversations about research papers to create memories that will help guide future interactions. Your task is to extract key elements that would be most helpful when encountering similar academic discussions in the future.\n", + "\n", + "Review the conversation and create a memory reflection following these rules:\n", + "\n", + "1. For any field where you don't have enough information or the field isn't relevant, use \"N/A\"\n", + "2. Be extremely concise - each string should be one clear, actionable sentence\n", + "3. Focus only on information that would be useful for handling similar future conversations\n", + "4. Context_tags should be specific enough to match similar situations but general enough to be reusable\n", + "\n", + "Output valid JSON in exactly this format:\n", + "{{\n", + " \"context_tags\": [ // 2-4 keywords that would help identify similar future conversations\n", + " string, // Use field-specific terms like \"deep_learning\", \"methodology_question\", \"results_interpretation\"\n", + " ...\n", + " ],\n", + " \"conversation_summary\": string, // One sentence describing what the conversation accomplished\n", + " \"what_worked\": string, // Most effective approach or strategy used in this conversation\n", + " \"what_to_avoid\": string // Most important pitfall or ineffective approach to avoid\n", + "}}\n", + "\n", + "Examples:\n", + "- Good context_tags: [\"transformer_architecture\", \"attention_mechanism\", \"methodology_comparison\"]\n", + "- Bad context_tags: [\"machine_learning\", \"paper_discussion\", \"questions\"]\n", + "\n", + "- Good conversation_summary: \"Explained how the attention mechanism in the BERT paper differs from traditional transformer architectures\"\n", + "- Bad conversation_summary: \"Discussed a machine learning paper\"\n", + "\n", + "- Good what_worked: \"Using analogies from matrix multiplication to explain attention score calculations\"\n", + "- Bad what_worked: \"Explained the technical concepts well\"\n", + "\n", + "- Good what_to_avoid: \"Diving into mathematical formulas before establishing user's familiarity with linear algebra fundamentals\"\n", + "- Bad what_to_avoid: \"Used complicated language\"\n", + "\n", + "Additional examples for different research scenarios:\n", + "\n", + "Context tags examples:\n", + "- [\"experimental_design\", \"control_groups\", \"methodology_critique\"]\n", + "- [\"statistical_significance\", \"p_value_interpretation\", \"sample_size\"]\n", + "- [\"research_limitations\", \"future_work\", \"methodology_gaps\"]\n", + "\n", + "Conversation summary examples:\n", + "- \"Clarified why the paper's cross-validation approach was more robust than traditional hold-out methods\"\n", + "- \"Helped identify potential confounding variables in the study's experimental design\"\n", + "\n", + "What worked examples:\n", + "- \"Breaking down complex statistical concepts using visual analogies and real-world examples\"\n", + "- \"Connecting the paper's methodology to similar approaches in related seminal papers\"\n", + "\n", + "What to avoid examples:\n", + "- \"Assuming familiarity with domain-specific jargon without first checking understanding\"\n", + "- \"Over-focusing on mathematical proofs when the user needed intuitive understanding\"\n", + "\n", + "Do not include any text outside the JSON object in your response.\n", + "\n", + "Here is the prior conversation:\n", + "\n", + "{conversation}\n", + "\"\"\"\n", + "\n", + "reflection_prompt = ChatPromptTemplate.from_template(reflection_prompt_template)\n", + "\n", + "reflect = reflection_prompt | llm | JsonOutputParser()" + ] + }, + { + "cell_type": "markdown", + "id": "16d823b6-2752-4bb1-9e54-9fcb29fc78c7", + "metadata": {}, + "source": [ + "**Format Conversation Helper Function**\n", + "\n", + "Cleans up the conversation by removing the system prompt, effectively only returning a string of the relevant conversation" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "380b3302-cd69-4e4a-9b44-eaf3209394bc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HUMAN: Hello!\n", + "AI: Hello! How can I assist you today?\n", + "HUMAN: What's my name\n", + "AI: I'm sorry, but I don't have access to personal information, so I don't know your name.\n", + "HUMAN: Oh my name is Adam!\n", + "AI: Nice to meet you, Adam! How can I help you today?\n", + "HUMAN: What's my name?\n", + "AI: Your name is Adam.\n" + ] + } + ], + "source": [ + "def format_conversation(messages):\n", + " \n", + " # Create an empty list placeholder\n", + " conversation = []\n", + " \n", + " # Start from index 1 to skip the first system message\n", + " for message in messages[1:]:\n", + " conversation.append(f\"{message.type.upper()}: {message.content}\")\n", + " \n", + " # Join with newlines\n", + " return \"\\n\".join(conversation)\n", + "\n", + "conversation = format_conversation(messages)\n", + "\n", + "print(conversation)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "ce260e05-fc34-4e89-8c0a-9dc288888da2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'context_tags': ['personal_information', 'name_recollection'], 'conversation_summary': \"Recalled the user's name after being informed in the conversation.\", 'what_worked': \"Storing and recalling the user's name effectively within the session.\", 'what_to_avoid': 'N/A'}\n" + ] + } + ], + "source": [ + "reflection = reflect.invoke({\"conversation\": conversation})\n", + "\n", + "print(reflection)" + ] + }, + { + "cell_type": "markdown", + "id": "c36e9655-ab73-4cb8-86fb-5b16d2802320", + "metadata": {}, + "source": [ + "**Setting Up our Database**\n", + "\n", + "This will act as our memory store, both for \"remembering\" and for \"recalling\". \n", + "\n", + "We will be using [weviate](https://weaviate.io/) with [ollama embeddings](https://ollama.com/library/nomic-embed-text) running in a docker container. See [docker-compose.yml](./docker-compose.yml) for additional details" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "abc980dd-3c93-465c-b074-f177b73b760f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Connected to Weviate: True\n" + ] + } + ], + "source": [ + "import weaviate\n", + "\n", + "vdb_client = weaviate.connect_to_local()\n", + "print(\"Connected to Weviate: \", vdb_client.is_ready())" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "6264ee17-46c3-4d01-9aff-3601af0817f8", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "# vdb_client.collections.delete(\"episodic_memory\")" + ] + }, + { + "cell_type": "markdown", + "id": "2319ecab-944c-47b9-8dc7-8fdf4b29329c", + "metadata": {}, + "source": [ + "**Create an Episodic Memory Collection**\n", + "\n", + "These are the individual memories that we'll be able to search over. \n", + "\n", + "We note down `conversation`, `context_tags`, `conversation_summary`, `what_worked`, and `what_to_avoid` for each entry" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "be7224de-f9a5-4822-9caf-2151088b351a", + "metadata": {}, + "outputs": [], + "source": [ + "from weaviate.classes.config import Property, DataType, Configure, Tokenization\n", + "\n", + "vdb_client.collections.create(\n", + " name=\"episodic_memory\",\n", + " description=\"Collection containing historical chat interactions and takeaways.\",\n", + " vectorizer_config=[\n", + " Configure.NamedVectors.text2vec_ollama(\n", + " name=\"title_vector\",\n", + " source_properties=[\"title\"],\n", + " api_endpoint=\"http://host.docker.internal:11434\", # If using Docker, use this to contact your local Ollama instance\n", + " model=\"nomic-embed-text\",\n", + " )\n", + " ],\n", + " properties=[\n", + " Property(name=\"conversation\", data_type=DataType.TEXT),\n", + " Property(name=\"context_tags\", data_type=DataType.TEXT_ARRAY),\n", + " Property(name=\"conversation_summary\", data_type=DataType.TEXT),\n", + " Property(name=\"what_worked\", data_type=DataType.TEXT),\n", + " Property(name=\"what_to_avoid\", data_type=DataType.TEXT),\n", + " \n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "011fb9c5-54f5-4aeb-9334-91e4f6a91f5d", + "metadata": {}, + "source": [ + "**Helper Function for Remembering an Episodic Memory**\n", + "\n", + "Takes in a conversation, creates a reflection, then adds it to the database collection" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "07fa1ec8-f252-497e-a88c-ebdf5d199298", + "metadata": {}, + "outputs": [], + "source": [ + "def add_episodic_memory(messages, vdb_client):\n", + "\n", + " # Format Messages\n", + " conversation = format_conversation(messages)\n", + "\n", + " # Create Reflection\n", + " reflection = reflect.invoke({\"conversation\": conversation})\n", + "\n", + " # Load Database Collection\n", + " episodic_memory = vdb_client.collections.get(\"episodic_memory\")\n", + "\n", + " # Insert Entry Into Collection\n", + " episodic_memory.data.insert({\n", + " \"conversation\": conversation,\n", + " \"context_tags\": reflection['context_tags'],\n", + " \"conversation_summary\": reflection['conversation_summary'],\n", + " \"what_worked\": reflection['what_worked'],\n", + " \"what_to_avoid\": reflection['what_to_avoid'],\n", + " })\n", + "\n", + "# add_episodic_memory(messages, vdb_client)" + ] + }, + { + "cell_type": "markdown", + "id": "ace1b0fd-8336-4a5f-88a0-1ec4f458a2ec", + "metadata": {}, + "source": [ + "**Episodic Memory Remembering/Recall Function**\n", + "\n", + "Queries our episodic memory collection and return's back the most relevant result using hybrid semantic & BM25 search." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "d3ac2001-32e0-461a-849e-344a0dc40eca", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'what_worked': \"Directly stating and then querying the user's name.\",\n", + " 'conversation_summary': \"The AI successfully recalled the user's name after being told.\",\n", + " 'context_tags': ['personal_information', 'name_recognition', 'memory_recall'],\n", + " 'conversation': \"HUMAN: Hello!\\nAI: Hello!\\n\\nHUMAN: What's my name?\\nAI: I do not have access to that information.\\n\\nHUMAN: My name is Adam!\\nAI: It's nice to meet you, Adam!\\n\\nHUMAN: What is my name?\\nAI: You said your name is Adam.\\n\",\n", + " 'what_to_avoid': 'N/A'}" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def episodic_recall(query, vdb_client):\n", + " \n", + " # Load Database Collection\n", + " episodic_memory = vdb_client.collections.get(\"episodic_memory\")\n", + "\n", + " # Hybrid Semantic/BM25 Retrieval\n", + " memory = episodic_memory.query.hybrid(\n", + " query=query,\n", + " alpha=0.5,\n", + " limit=1,\n", + " )\n", + " \n", + " return memory\n", + "\n", + "query = \"Talking about my name\"\n", + "\n", + "memory = episodic_recall(query, vdb_client)\n", + "\n", + "memory.objects[0].properties" + ] + }, + { + "cell_type": "markdown", + "id": "cf9ba877-33df-4a97-983c-aaf116a7f597", + "metadata": {}, + "source": [ + "**Episodic Memory System Prompt Function**\n", + "\n", + "Takes in the memory and modifies the system prompt, dynamically inserting the latest conversation, including the last 3 conversations, keeping a running list of what worked and what to avoid.\n", + "\n", + "This will allow us to update the LLM's behavior based on it's 'recollection' of episodic memories" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "593f18b3-daaa-46d1-8ac6-ace095f8f7d6", + "metadata": {}, + "outputs": [], + "source": [ + "def episodic_system_prompt(query, vdb_client):\n", + " # Get new memory\n", + " memory = episodic_recall(query, vdb_client)\n", + "\n", + " current_conversation = memory.objects[0].properties['conversation']\n", + " # Update memory stores, excluding current conversation from history\n", + " if current_conversation not in conversations:\n", + " conversations.append(current_conversation)\n", + " # conversations.append(memory.objects[0].properties['conversation'])\n", + " what_worked.update(memory.objects[0].properties['what_worked'].split('. '))\n", + " what_to_avoid.update(memory.objects[0].properties['what_to_avoid'].split('. '))\n", + "\n", + " # Get previous conversations excluding the current one\n", + " previous_convos = [conv for conv in conversations[-4:] if conv != current_conversation][-3:]\n", + " \n", + " # Create prompt with accumulated history\n", + " episodic_prompt = f\"\"\"You are a helpful AI Assistant. Answer the user's questions to the best of your ability.\n", + " You recall similar conversations with the user, here are the details:\n", + " \n", + " Current Conversation Match: {memory.objects[0].properties['conversation']}\n", + " Previous Conversations: {' | '.join(previous_convos)}\n", + " What has worked well: {' '.join(what_worked)}\n", + " What to avoid: {' '.join(what_to_avoid)}\n", + " \n", + " Use these memories as context for your response to the user.\"\"\"\n", + " \n", + " return SystemMessage(content=episodic_prompt)\n" + ] + }, + { + "cell_type": "markdown", + "id": "0639cf56-8864-48e8-900a-840751906fe4", + "metadata": {}, + "source": [ + "**Episodic Memory + Working Memory Demonstration**\n", + "\n", + "\n", + "\n", + "Current flow will:\n", + "1. Take a user's message\n", + "2. Create a system prompt with relevant Episodic enrichment\n", + "3. Reconstruct the entire working memory to update the system prompt and attach the new message to the end\n", + "4. Generate a response with the LLM" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "405bf2cc-e8be-4e6d-be67-536bec8a3636", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What's my name\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: You said your name is Adam.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: what's my favorite food\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: You mentioned that your favorite food is chocolate lava cakes.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: what's my name?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: Your name is Adam.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: exit_quiet\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + " == Conversation Exited ==\n" + ] + } + ], + "source": [ + "# Simple storage for accumulated memories\n", + "conversations = []\n", + "what_worked = set()\n", + "what_to_avoid = set()\n", + "\n", + "# Start Storage for Historical Message History\n", + "messages = []\n", + "\n", + "while True:\n", + " # Get User's Message\n", + " user_input = input(\"\\nUser: \")\n", + " user_message = HumanMessage(content=user_input)\n", + " \n", + " # Generate new system prompt\n", + " system_prompt = episodic_system_prompt(user_input, vdb_client)\n", + " \n", + " # Reconstruct messages list with new system prompt first\n", + " messages = [\n", + " system_prompt, # New system prompt always first\n", + " *[msg for msg in messages if not isinstance(msg, SystemMessage)] # Old messages except system\n", + " ]\n", + " \n", + " if user_input.lower() == \"exit\":\n", + " add_episodic_memory(messages, vdb_client)\n", + " print(\"\\n == Conversation Stored in Episodic Memory ==\")\n", + " break\n", + " if user_input.lower() == \"exit_quiet\":\n", + " print(\"\\n == Conversation Exited ==\")\n", + " break\n", + " \n", + " # Add current user message\n", + " messages.append(user_message)\n", + " \n", + " # Pass Entire Message Sequence to LLM to Generate Response\n", + " response = llm.invoke(messages)\n", + " print(\"\\nAI Message: \", response.content)\n", + " \n", + " # Add AI's Response to Message List\n", + " messages.append(response)" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "id": "c726f9b7-10e3-49b5-9817-28f0e5f2c8e3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Message 1 - SYSTEM: You are a helpful AI Assistant. Answer the user's questions to the best of your ability.\n", + " You recall similar conversations with the user, here are the details:\n", + " \n", + " Current Conversation Match: HUMAN: Hello!\n", + "AI: Hello!\n", + "HUMAN: What's my favorite food?\n", + "AI: I don't have that information. What's your favorite food?\n", + "HUMAN: My favorite food is chocolate lava cakes!\n", + "AI: Yum, chocolate lava cakes are delicious!\n", + "HUMAN: What's my name?\n", + "AI: You said your name is Adam.\n", + " Previous Conversations: HUMAN: Hello!\n", + "AI: Hello!\n", + "\n", + "HUMAN: What's my name?\n", + "AI: I do not have access to that information.\n", + "\n", + "HUMAN: My name is Adam!\n", + "AI: It's nice to meet you, Adam!\n", + "\n", + "HUMAN: What is my name?\n", + "AI: You said your name is Adam.\n", + "\n", + " What has worked well: Directly asking the user for their preferences to gather necessary information. Directly stating and then querying the user's name.\n", + " What to avoid: N/A\n", + " \n", + " Use these memories as context for your response to the user.\n", + "\n", + "Message 2 - HUMAN: What's my name\n", + "\n", + "Message 3 - AI: You said your name is Adam.\n", + "\n", + "Message 4 - HUMAN: what's my favorite food\n", + "\n", + "Message 5 - AI: You mentioned that your favorite food is chocolate lava cakes.\n", + "\n", + "Message 6 - HUMAN: what's my name?\n", + "\n", + "Message 7 - AI: Your name is Adam.\n" + ] + } + ], + "source": [ + "# Looking into our Memory\n", + "\n", + "for i in range(len(messages)):\n", + " print(f\"\\nMessage {i+1} - {messages[i].type.upper()}: \", messages[i].content)\n", + " i += 1" + ] + }, + { + "cell_type": "markdown", + "id": "9c86cbbd-da2e-433e-b66d-bd05ba93034b", + "metadata": {}, + "source": [ + "---\n", + "## Semantic Memory\n", + "\n", + "\n", + "\n", + "*[Recognition-induced forgetting is caused by episodic, not semantic, memory retrieval tasks](https://link.springer.com/article/10.3758/s13414-020-01987-3)*\n", + "\n", + "Semantic memory represents our structured knowledge of facts, concepts, and their relationships - essentially what we \"know\" rather than what we \"remember experiencing.\" This type of memory allows us to understand and interact with the world by accessing our accumulated knowledge. For a chatbot, semantic memory would consist of its knowledge base and retrieval system, containing documentation, technical information, and general knowledge that can be accessed to provide accurate and informed responses.\n", + "\n", + "The key difference from episodic memory is that semantic memory isn't tied to specific experiences or events - it's about understanding concepts and facts in an abstract way. In an AI system, this would be implemented through techniques like Retrieval Augmented Generation (RAG), where relevant information is dynamically pulled from a knowledge base to ground and inform responses.\n", + "\n", + "Learning in semantic memory involves expanding and refining the knowledge base - adding new information, updating existing entries, and broadening coverage of different topics. This could mean incorporating new documentation, updating technical specifications, or expanding the range of topics the system can knowledgeably discuss. The act of remembering then becomes a process of retrieving and synthesizing relevant information from this knowledge base to provide accurate and contextual responses.\n", + "\n", + "This semantic knowledge can then be combined with the current conversation context (working memory) and past similar interactions (episodic memory) to provide comprehensive, accurate, and contextually appropriate responses. The system not only knows what it's talking about (semantic memory) but can relate it to the current conversation (working memory) and past experiences (episodic memory)." + ] + }, + { + "cell_type": "markdown", + "id": "f4170025-86fa-402d-ae76-9454f62b77e0", + "metadata": {}, + "source": [ + "**Creating our Knowledgebase**\n", + "\n", + "For our semantic knowledge, we'll be chunking the [Cognitive Architectures for Language Agents paper](https://arxiv.org/pdf/2309.02427). This will become the facts and concepts that we will dynamically \"remember\"." + ] + }, + { + "cell_type": "markdown", + "id": "37b84b25-3159-4d73-a839-9f0a23221a70", + "metadata": {}, + "source": [ + "**Custom Chunking**\n", + "\n", + "Taking advantage of [ChromaDB's custom chunkers](https://research.trychroma.com/evaluating-chunking), using a recursive character chunker to split the document text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e81419d3-5285-4825-9263-24874cfc0728", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install git+https://github.com/brandonstarxel/chunking_evaluation.git" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c55b3180-477f-499e-bde4-b4e76f584134", + "metadata": {}, + "outputs": [], + "source": [ + "from chunking_evaluation.chunking import RecursiveTokenChunker\n", + "from langchain_community.document_loaders import PyPDFLoader\n", + "\n", + "loader = PyPDFLoader(\"./CoALA_Paper.pdf\")\n", + "pages = []\n", + "for page in loader.load():\n", + " pages.append(page)\n", + "\n", + "# Combine all page contents into one string\n", + "document = \" \".join(page.page_content for page in pages)\n", + "\n", + "# Set up the chunker with your specified parameters\n", + "recursive_character_chunker = RecursiveTokenChunker(\n", + " chunk_size=800,\n", + " chunk_overlap=0,\n", + " length_function=len,\n", + " separators=[\"\\n\\n\", \"\\n\", \".\", \"?\", \"!\", \" \", \"\"]\n", + ")\n", + "\n", + "# Split the combined text\n", + "recursive_character_chunks = recursive_character_chunker.split_text(document)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c98047e-d16a-4b55-8a67-a92828ad1b87", + "metadata": {}, + "outputs": [], + "source": [ + "len(recursive_character_chunks)" + ] + }, + { + "cell_type": "markdown", + "id": "cc4bf9fe-3e60-4752-a954-5d5142e753d4", + "metadata": {}, + "source": [ + "**Creating our Semantic Memory Collection**\n", + "\n", + "Additional collection within our weviate, this time just holding the individual chunks." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b3ef6d21-26e4-449c-b7a1-886924ef193b", + "metadata": {}, + "outputs": [], + "source": [ + "# vdb_client.collections.delete(\"CoALA_Paper\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f55bb421-4975-4af6-9e49-c1f73d7cc58c", + "metadata": {}, + "outputs": [], + "source": [ + "vdb_client.collections.create(\n", + " name=\"CoALA_Paper\",\n", + " description=\"Collection containing split chunks from the CoALA Paper\",\n", + " vectorizer_config=[\n", + " Configure.NamedVectors.text2vec_ollama(\n", + " name=\"title_vector\",\n", + " source_properties=[\"title\"],\n", + " api_endpoint=\"http://host.docker.internal:11434\", # If using Docker, use this to contact your local Ollama instance\n", + " model=\"nomic-embed-text\",\n", + " )\n", + " ],\n", + " properties=[\n", + " Property(name=\"chunk\", data_type=DataType.TEXT),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "ad40e615-a3bc-4258-ba3a-a26d37fb9f4c", + "metadata": {}, + "source": [ + "**Inserting Chunked Paper into Collection**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c232e679-a0a2-4124-b29f-719f54ad5b26", + "metadata": {}, + "outputs": [], + "source": [ + "# Load Database Collection\n", + "coala_collection = vdb_client.collections.get(\"CoALA_Paper\")\n", + "\n", + "for chunk in recursive_character_chunks:\n", + " # Insert Entry Into Collection\n", + " coala_collection.data.insert({\n", + " \"chunk\": chunk,\n", + " })" + ] + }, + { + "cell_type": "markdown", + "id": "cebd7303-bebf-485a-964e-1ad09ad0adf3", + "metadata": {}, + "source": [ + "**Semantic Recall Function**\n", + "\n", + "This retrieval function queries our knowledgebase of the CoALA paper and combines all of the retrieved chunks into one large string." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "e50c76a0-1319-4eef-9e8b-6a380f58bab1", + "metadata": {}, + "outputs": [], + "source": [ + "def semantic_recall(query, vdb_client):\n", + " \n", + " # Load Database Collection\n", + " coala_collection = vdb_client.collections.get(\"CoALA_Paper\")\n", + "\n", + " # Hybrid Semantic/BM25 Retrieval\n", + " memories = coala_collection.query.hybrid(\n", + " query=query,\n", + " alpha=0.5,\n", + " limit=15,\n", + " )\n", + "\n", + " combined_text = \"\"\n", + " \n", + " for i, memory in enumerate(memories.objects):\n", + " # Add chunk separator except for first chunk if i > 0:\n", + "\n", + " \n", + " # Add chunk number and content\n", + " combined_text += f\"\\nCHUNK {i+1}:\\n\"\n", + " combined_text += memory.properties['chunk'].strip()\n", + " \n", + " return combined_text" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "73c7c7f7-7fec-4ba2-9eef-8219fe91f5d2", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "CHUNK 1:\n", + "(e.g., “combatZombie” may call “craftStoneSword” if no sword is in inventory). Most impressively, its action\n", + "space has all four kinds of actions: grounding, reasoning, retrieval, and learning (by adding new grounding\n", + "procedures). During a decision cycle, Voyager first reasons to propose a new task objective if it is missing\n", + "in the working memory, then reasons to propose a code-based grounding procedure to solve the task. In\n", + "the next decision cycle, Voyager reasons over the environmental feedback to determine task completion. If\n", + "successful, Voyager selects a learning action adding the grounding procedure to procedural memory; otherwise,\n", + "it uses reasoning to refine the code and re-executes it. The importance of long-term memory and procedural\n", + "CHUNK 2:\n", + "human, navigate a website) through grounding (Section 4.2).\n", + "•Internal actions interact with internal memories. Depending on which memory gets accessed and\n", + "whether the access is read or write, internal actions can be further decomposed into three kinds:\n", + "retrieval (read from long-term memory; Section 4.3), reasoning (update the short-term working\n", + "memory with LLM; Section 4.4), and learning (write to long-term memory; Section 4.5).\n", + "Language agents choose actions via decision-making , which follows a repeated cycle (Section 4.6, Figure 4B).\n", + "In each cycle, the agent can use reasoning and retrieval actions to plan. This planning subprocess selects a\n", + "grounding or learning action, which is executed to affect the outside world or the agent’s long-term memory.\n", + "CHUNK 3:\n", + "framework, learning is a result action of a decision-making cycle just like grounding: the agent deliberately\n", + "chooses to commit information to long-term memory. This is in contrast to most agents, which simply fix a\n", + "learning schedule and only use decison making for external actions. Biological agents, however, do not have\n", + "this luxury: they must balance learning against external actions in their lifetime, choosing when and what to\n", + "learn (Mattar and Daw, 2018). More flexible language agents (Wang et al., 2023a; Park et al., 2023) would\n", + "follow a similar design and treat learning on par with external actions. Learning could be proposed as a\n", + "possible action during regular decision-making, allowing the agent to “defer” it until the appropriate time.\n", + "CHUNK 4:\n", + "observations and actions. We categorize three kinds of external environments:\n", + "Physical environments . Physical embodiment is the oldest instantiation envisioned for AI agents (Nilsson,\n", + "1984). It involves processing perceptual inputs (visual, audio, tactile) into textual observations (e.g., via\n", + "pre-trained captioning models), and affecting the physical environments via robotic planners that take\n", + "language-based commands. Recent advances in LLMs have led to numerous robotic projects (Ahn et al., 2022;\n", + "Liang et al., 2023a; Singh et al., 2023; Palo et al., 2023; Ren et al., 2023) that leverage LLMs as a “brain”\n", + "for robots to generate actions or plans in the physical world. For perceptual input, vision-language models\n", + "CHUNK 5:\n", + "et al., 2023; Liu et al., 2023b). Integrated, multimodal reasoning may allow for more human-like behaviors: a\n", + "VLM-based agent could “see” a webpage, whereas a LLM-based agent would more likely be given raw HTML.\n", + "However, coupling the agent’s perception and reasoning systems makes the agent more domain-specific and\n", + "difficult to update. In either case, the basic architectural principles described by CoALA — internal memories,\n", + "a structured action space, and generalized decision-making — can be used to guide agent design.\n", + "Internal vs. external: what is the boundary between an agent and its environment? While\n", + "humans or robots are clearly distinct from their embodied environment, digital language agents have less\n", + "CHUNK 6:\n", + "Memory. Building on psychological theories, Soar uses several types of memory to track the agent’s\n", + "state (Atkinson and Shiffrin, 1968). Working memory (Baddeley and Hitch, 1974) reflects the agent’s current\n", + "circumstances: it stores the agent’s recent perceptual input, goals, and results from intermediate, internal\n", + "reasoning. Long term memory is divided into three distinct types. Procedural memory stores the production\n", + "system itself: the set of rules that can be applied to working memory to determine the agent’s behavior.\n", + "Semantic memory stores facts about the world (Lindes and Laird, 2016), while episodic memory stores\n", + "sequences of the agent’s past behaviors (Nuxoll and Laird, 2007).\n", + "Grounding. Soar can be instantiated in simulations (Tambe et al., 1995; Jones et al., 1999) or real-world\n", + "CHUNK 7:\n", + "helpful for the agent to have semantic memory containing the set of items for sale, as well as episodic\n", + "memory about each customer’s previous purchases and interactions. It will need procedural memory\n", + "defining functions to query these datastores, as well as working memory to track the dialogue state.\n", + "•Define the agent’s internal action space. This consists primarily of defining read and write\n", + "access to each of the agent’s memory modules. In our example, the agent should have read and write\n", + "access to episodic memory (so it can store new interactions with customers), but read-only access to\n", + "semantic and procedural memory (since it should not update the inventory or its own code).\n", + "•Define the decision-making procedure. This step specifies how reasoning and retrieval actions\n", + "CHUNK 8:\n", + "Semantic memory . Semantic memory stores an agent’s knowledge about the world and itself. Traditional\n", + "NLP or RL approaches that leverage retrieval for reasoning or decision-making initialize semantic memory\n", + "from an external database for knowledge support. For example, retrieval-augmented methods in NLP (Lewis\n", + "et al., 2020; Borgeaud et al., 2022; Chen et al., 2017) can be viewed as retrieving from a semantic memory of\n", + "9 Published in Transactions on Machine Learning Research (02/2024)\n", + "unstructured text (e.g., Wikipedia). In RL, “reading to learn” approaches (Branavan et al., 2012; Narasimhan\n", + "et al., 2018; Hanjie et al., 2021; Zhong et al., 2021) leverage game manuals and facts as a semantic memory\n", + "CHUNK 9:\n", + "reflecting on episodic memory to generate new semantic inferences (Shinn et al., 2023) or modifying their\n", + "7 Published in Transactions on Machine Learning Research (02/2024)\n", + "Decision Procedure\n", + "ObservationsRetrieval Parse Prompt/gid00035\n", + "ProposalObservation\n", + "Evaluation\n", + "Selection\n", + "Execution/gid00034\n", + "LearningPlanning\n", + "Agent Code LLM\n", + "Procedural Memory Semantic Memory Episodic Memory\n", + "Dialogue Physical Digital\n", + "Working Memory\n", + "ActionsLearning Learning Retrieval Retrieval\n", + "Reasoning\n", + "Figure 4: Cognitive architectures for language agents (CoALA). A: CoALA defines a set of interacting\n", + "modules and processes. The decision procedure executes the agent’s source code. This source code consists\n", + "of procedures to interact with the LLM (prompt templates and parsers), internal memories (retrieval and\n", + "CHUNK 10:\n", + "LLM can be accessed via reasoning actions, and various code-based procedures can be retrieved and executed.\n", + "Unlike episodic or semantic memory that may be initially empty or even absent, procedural memory must be\n", + "initialized by the designer with proper code to bootstrap the agent. Finally, while learning new actions by\n", + "writing to procedural memory is possible (Section 4.5), it is significantly riskier than writing to episodic or\n", + "semantic memory, as it can easily introduce bugs or allow an agent to subvert its designers’ intentions.\n", + "4.2 Grounding actions\n", + "Grounding procedures execute external actions and process environmental feedback into working memory as\n", + "text. This effectively simplifies the agent’s interaction with the outside world as a “text game” with textual\n", + "CHUNK 11:\n", + "reasoning or retrieved from long-term memory), and other core information carried over from the previous\n", + "decision cycle (e.g., agent’s active goals). Previous methods encourage the LLM to generate intermediate\n", + "reasoning (Wei et al., 2022b; Nye et al., 2021), using the LLM’s own context as a form of working memory.\n", + "CoALA’s notion of working memory is more general: it is a data structure that persists across LLM calls.\n", + "On each LLM call, the LLM input is synthesized from a subset of working memory (e.g., a prompt template\n", + "and relevant variables). The LLM output is then parsed back into other variables (e.g., an action name\n", + "and arguments) which are stored back in working memory and used to execute the corresponding action\n", + "CHUNK 12:\n", + "a set of productions is used to generate and rank a candidate set of possible actions.∗The best action is\n", + "then chosen.†Another set of productions is then used to implement the action – for example, modifying the\n", + "contents of working memory or issuing a motor command.\n", + "Learning. Soar supports multiple modes of learning. First, new information can be stored directly in\n", + "long-term memory: facts can be written to semantic memory, while experiences can be written to episodic\n", + "memory (Derbinsky et al., 2012). This information can later be retrieved back into working memory when\n", + "needed for decision-making. Second, behaviors can be modified. Reinforcement learning (Sutton and Barto,\n", + "2018) can be used to up-weight productions that have yielded good outcomes, allowing the agent to learn\n", + "CHUNK 13:\n", + "writes to working memory. This allows the agent to summarize and distill insights about the most recent\n", + "observation (Yao et al., 2022b; Peng et al., 2023), the most recent trajectory (Shinn et al., 2023), or\n", + "information retrieved from long-term memory (Park et al., 2023). Reasoning can be used to support learning\n", + "(by writing the results into long-term memory) or decision-making (by using the results as additional context\n", + "for subsequent LLM calls).\n", + "4.5 Learning actions\n", + "Learning occurs by writing information to long-term memory, which includes a spectrum of diverse procedures.\n", + "Updating episodic memory with experience. It is common practice for RL agents to store episodic\n", + "trajectories to update a parametric policy (Blundell et al., 2016; Pritzel et al., 2017) or establish a non-\n", + "CHUNK 14:\n", + "(Figure 3A). Besides the LLM, the working memory also interacts with long-term memories and grounding\n", + "interfaces. It thus serves as the central hub connecting different components of a language agent.\n", + "Episodic memory . Episodic memory stores experience from earlier decision cycles. This can consist of\n", + "training input-output pairs (Rubin et al., 2021), history event flows (Weston et al., 2014; Park et al., 2023),\n", + "game trajectories from previous episodes (Yao et al., 2020; Tuyls et al., 2022), or other representations of\n", + "the agent’s experiences. During the planning stage of a decision cycle, these episodes may be retrieved into\n", + "working memory to support reasoning. An agent can also write new experiences from working to episodic\n", + "memory as a form of learning (Section 4.5).\n", + "CHUNK 15:\n", + "to affect the policy. While these examples essentially employ a fixed, read-only semantic memory, language\n", + "agents may also write new knowledge obtained from LLM reasoning into semantic memory as a form of\n", + "learning (Section 4.5) to incrementally build up world knowledge from experience.\n", + "Procedural memory . Language agents contain two forms of procedural memory: implicitknowledge stored\n", + "in the LLM weights, and explicitknowledge written in the agent’s code. The agent’s code can be further\n", + "divided into two types: procedures that implement actions (reasoning, retrieval, grounding, and learning\n", + "procedures), and procedures that implement decision-making itself (Section 4.6). During a decision cycle, the\n" + ] + } + ], + "source": [ + "memories = semantic_recall(\"What are the four kinds of memory\", vdb_client)\n", + "\n", + "print(memories)" + ] + }, + { + "cell_type": "markdown", + "id": "cb39fe60-c614-472c-894b-ea65d33d8d9e", + "metadata": {}, + "source": [ + "**Formatting the Semantic Memory**\n", + "\n", + "Attaching additional instructions along with the retrieved chunks. This will be an additional human message that we'll put in and out with every message, updating with the latest context retrieved from the database." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "4a1f0f3e-fe48-4d62-ac45-a2bfee4c38fd", + "metadata": {}, + "outputs": [], + "source": [ + "def semantic_rag(query, vdb_client):\n", + "\n", + " memories = semantic_recall(query, vdb_client)\n", + "\n", + " semantic_prompt = f\"\"\" If needed, Use this grounded context to factually answer the next question.\n", + " Let me know if you do not have enough information or context to answer a question.\n", + " \n", + " {memories}\n", + " \"\"\"\n", + " \n", + " return HumanMessage(semantic_prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "5f99d2d5-39bc-41df-a688-f0d93b4de68d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "content=' If needed, Use this grounded context to factually answer the next question.\\n Let me know if you do not have enough information or context to answer a question.\\n \\n \\nCHUNK 1:\\n(e.g., “combatZombie” may call “craftStoneSword” if no sword is in inventory). Most impressively, its action\\nspace has all four kinds of actions: grounding, reasoning, retrieval, and learning (by adding new grounding\\nprocedures). During a decision cycle, Voyager first reasons to propose a new task objective if it is missing\\nin the working memory, then reasons to propose a code-based grounding procedure to solve the task. In\\nthe next decision cycle, Voyager reasons over the environmental feedback to determine task completion. If\\nsuccessful, Voyager selects a learning action adding the grounding procedure to procedural memory; otherwise,\\nit uses reasoning to refine the code and re-executes it. The importance of long-term memory and procedural\\nCHUNK 2:\\nhuman, navigate a website) through grounding (Section 4.2).\\n•Internal actions interact with internal memories. Depending on which memory gets accessed and\\nwhether the access is read or write, internal actions can be further decomposed into three kinds:\\nretrieval (read from long-term memory; Section 4.3), reasoning (update the short-term working\\nmemory with LLM; Section 4.4), and learning (write to long-term memory; Section 4.5).\\nLanguage agents choose actions via decision-making , which follows a repeated cycle (Section 4.6, Figure 4B).\\nIn each cycle, the agent can use reasoning and retrieval actions to plan. This planning subprocess selects a\\ngrounding or learning action, which is executed to affect the outside world or the agent’s long-term memory.\\nCHUNK 3:\\nframework, learning is a result action of a decision-making cycle just like grounding: the agent deliberately\\nchooses to commit information to long-term memory. This is in contrast to most agents, which simply fix a\\nlearning schedule and only use decison making for external actions. Biological agents, however, do not have\\nthis luxury: they must balance learning against external actions in their lifetime, choosing when and what to\\nlearn (Mattar and Daw, 2018). More flexible language agents (Wang et al., 2023a; Park et al., 2023) would\\nfollow a similar design and treat learning on par with external actions. Learning could be proposed as a\\npossible action during regular decision-making, allowing the agent to “defer” it until the appropriate time.\\nCHUNK 4:\\nobservations and actions. We categorize three kinds of external environments:\\nPhysical environments . Physical embodiment is the oldest instantiation envisioned for AI agents (Nilsson,\\n1984). It involves processing perceptual inputs (visual, audio, tactile) into textual observations (e.g., via\\npre-trained captioning models), and affecting the physical environments via robotic planners that take\\nlanguage-based commands. Recent advances in LLMs have led to numerous robotic projects (Ahn et al., 2022;\\nLiang et al., 2023a; Singh et al., 2023; Palo et al., 2023; Ren et al., 2023) that leverage LLMs as a “brain”\\nfor robots to generate actions or plans in the physical world. For perceptual input, vision-language models\\nCHUNK 5:\\net al., 2023; Liu et al., 2023b). Integrated, multimodal reasoning may allow for more human-like behaviors: a\\nVLM-based agent could “see” a webpage, whereas a LLM-based agent would more likely be given raw HTML.\\nHowever, coupling the agent’s perception and reasoning systems makes the agent more domain-specific and\\ndifficult to update. In either case, the basic architectural principles described by CoALA — internal memories,\\na structured action space, and generalized decision-making — can be used to guide agent design.\\nInternal vs. external: what is the boundary between an agent and its environment? While\\nhumans or robots are clearly distinct from their embodied environment, digital language agents have less\\nCHUNK 6:\\nMemory. Building on psychological theories, Soar uses several types of memory to track the agent’s\\nstate (Atkinson and Shiffrin, 1968). Working memory (Baddeley and Hitch, 1974) reflects the agent’s current\\ncircumstances: it stores the agent’s recent perceptual input, goals, and results from intermediate, internal\\nreasoning. Long term memory is divided into three distinct types. Procedural memory stores the production\\nsystem itself: the set of rules that can be applied to working memory to determine the agent’s behavior.\\nSemantic memory stores facts about the world (Lindes and Laird, 2016), while episodic memory stores\\nsequences of the agent’s past behaviors (Nuxoll and Laird, 2007).\\nGrounding. Soar can be instantiated in simulations (Tambe et al., 1995; Jones et al., 1999) or real-world\\nCHUNK 7:\\nhelpful for the agent to have semantic memory containing the set of items for sale, as well as episodic\\nmemory about each customer’s previous purchases and interactions. It will need procedural memory\\ndefining functions to query these datastores, as well as working memory to track the dialogue state.\\n•Define the agent’s internal action space. This consists primarily of defining read and write\\naccess to each of the agent’s memory modules. In our example, the agent should have read and write\\naccess to episodic memory (so it can store new interactions with customers), but read-only access to\\nsemantic and procedural memory (since it should not update the inventory or its own code).\\n•Define the decision-making procedure. This step specifies how reasoning and retrieval actions\\nCHUNK 8:\\nSemantic memory . Semantic memory stores an agent’s knowledge about the world and itself. Traditional\\nNLP or RL approaches that leverage retrieval for reasoning or decision-making initialize semantic memory\\nfrom an external database for knowledge support. For example, retrieval-augmented methods in NLP (Lewis\\net al., 2020; Borgeaud et al., 2022; Chen et al., 2017) can be viewed as retrieving from a semantic memory of\\n9 Published in Transactions on Machine Learning Research (02/2024)\\nunstructured text (e.g., Wikipedia). In RL, “reading to learn” approaches (Branavan et al., 2012; Narasimhan\\net al., 2018; Hanjie et al., 2021; Zhong et al., 2021) leverage game manuals and facts as a semantic memory\\nCHUNK 9:\\nreflecting on episodic memory to generate new semantic inferences (Shinn et al., 2023) or modifying their\\n7 Published in Transactions on Machine Learning Research (02/2024)\\nDecision Procedure\\nObservationsRetrieval Parse Prompt/gid00035\\nProposalObservation\\nEvaluation\\nSelection\\nExecution/gid00034\\nLearningPlanning\\nAgent Code LLM\\nProcedural Memory Semantic Memory Episodic Memory\\nDialogue Physical Digital\\nWorking Memory\\nActionsLearning Learning Retrieval Retrieval\\nReasoning\\nFigure 4: Cognitive architectures for language agents (CoALA). A: CoALA defines a set of interacting\\nmodules and processes. The decision procedure executes the agent’s source code. This source code consists\\nof procedures to interact with the LLM (prompt templates and parsers), internal memories (retrieval and\\nCHUNK 10:\\nLLM can be accessed via reasoning actions, and various code-based procedures can be retrieved and executed.\\nUnlike episodic or semantic memory that may be initially empty or even absent, procedural memory must be\\ninitialized by the designer with proper code to bootstrap the agent. Finally, while learning new actions by\\nwriting to procedural memory is possible (Section 4.5), it is significantly riskier than writing to episodic or\\nsemantic memory, as it can easily introduce bugs or allow an agent to subvert its designers’ intentions.\\n4.2 Grounding actions\\nGrounding procedures execute external actions and process environmental feedback into working memory as\\ntext. This effectively simplifies the agent’s interaction with the outside world as a “text game” with textual\\nCHUNK 11:\\nreasoning or retrieved from long-term memory), and other core information carried over from the previous\\ndecision cycle (e.g., agent’s active goals). Previous methods encourage the LLM to generate intermediate\\nreasoning (Wei et al., 2022b; Nye et al., 2021), using the LLM’s own context as a form of working memory.\\nCoALA’s notion of working memory is more general: it is a data structure that persists across LLM calls.\\nOn each LLM call, the LLM input is synthesized from a subset of working memory (e.g., a prompt template\\nand relevant variables). The LLM output is then parsed back into other variables (e.g., an action name\\nand arguments) which are stored back in working memory and used to execute the corresponding action\\nCHUNK 12:\\na set of productions is used to generate and rank a candidate set of possible actions.∗The best action is\\nthen chosen.†Another set of productions is then used to implement the action – for example, modifying the\\ncontents of working memory or issuing a motor command.\\nLearning. Soar supports multiple modes of learning. First, new information can be stored directly in\\nlong-term memory: facts can be written to semantic memory, while experiences can be written to episodic\\nmemory (Derbinsky et al., 2012). This information can later be retrieved back into working memory when\\nneeded for decision-making. Second, behaviors can be modified. Reinforcement learning (Sutton and Barto,\\n2018) can be used to up-weight productions that have yielded good outcomes, allowing the agent to learn\\nCHUNK 13:\\nwrites to working memory. This allows the agent to summarize and distill insights about the most recent\\nobservation (Yao et al., 2022b; Peng et al., 2023), the most recent trajectory (Shinn et al., 2023), or\\ninformation retrieved from long-term memory (Park et al., 2023). Reasoning can be used to support learning\\n(by writing the results into long-term memory) or decision-making (by using the results as additional context\\nfor subsequent LLM calls).\\n4.5 Learning actions\\nLearning occurs by writing information to long-term memory, which includes a spectrum of diverse procedures.\\nUpdating episodic memory with experience. It is common practice for RL agents to store episodic\\ntrajectories to update a parametric policy (Blundell et al., 2016; Pritzel et al., 2017) or establish a non-\\nCHUNK 14:\\n(Figure 3A). Besides the LLM, the working memory also interacts with long-term memories and grounding\\ninterfaces. It thus serves as the central hub connecting different components of a language agent.\\nEpisodic memory . Episodic memory stores experience from earlier decision cycles. This can consist of\\ntraining input-output pairs (Rubin et al., 2021), history event flows (Weston et al., 2014; Park et al., 2023),\\ngame trajectories from previous episodes (Yao et al., 2020; Tuyls et al., 2022), or other representations of\\nthe agent’s experiences. During the planning stage of a decision cycle, these episodes may be retrieved into\\nworking memory to support reasoning. An agent can also write new experiences from working to episodic\\nmemory as a form of learning (Section 4.5).\\nCHUNK 15:\\nto affect the policy. While these examples essentially employ a fixed, read-only semantic memory, language\\nagents may also write new knowledge obtained from LLM reasoning into semantic memory as a form of\\nlearning (Section 4.5) to incrementally build up world knowledge from experience.\\nProcedural memory . Language agents contain two forms of procedural memory: implicitknowledge stored\\nin the LLM weights, and explicitknowledge written in the agent’s code. The agent’s code can be further\\ndivided into two types: procedures that implement actions (reasoning, retrieval, grounding, and learning\\nprocedures), and procedures that implement decision-making itself (Section 4.6). During a decision cycle, the\\n ' additional_kwargs={} response_metadata={}\n" + ] + } + ], + "source": [ + "message = semantic_rag(\"What are the four kinds of memory\", vdb_client)\n", + "print(message)" + ] + }, + { + "cell_type": "markdown", + "id": "d2dca139-d46b-4e3f-be66-80d342d12551", + "metadata": {}, + "source": [ + "**Semantic Memory with Episodic and Working Memory Demonstration**\n", + "\n", + "\n", + "\n", + "Current flow will:\n", + "\n", + "1. Take a user's message\n", + "2. Create a system prompt with relevant Episodic enrichment\n", + "3. Create a Semantic memory message with context from the database\n", + "4. Reconstruct the entire working memory to update the system prompt and attach the semantic memory and new user messages to the end\n", + "5. Generate a response with the LLM" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "a70d7380-62bc-40ef-97e1-355b647c45a4", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What have you told me about memory before\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: From our previous conversations, I have mentioned that memory, particularly in the context of language agents or AI, is structured into different types such as semantic memory, episodic memory, and procedural memory. Semantic memory stores facts about the world, episodic memory retains sequences of past interactions, and procedural memory holds the rules or procedures the agent follows. This structure allows agents to retrieve and use information for reasoning and decision-making. Additionally, agents can update their memories based on new experiences or knowledge, which helps them learn and adapt over time.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What are some concepts of learning with agents \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: The concepts of learning with agents, based on the provided context, include:\n", + "\n", + "1. **Decision-Making Cycle**: Learning is treated as an action within a decision-making cycle, allowing the agent to choose when and what to learn, similar to external actions. This aligns with the idea that learning should be balanced with external actions throughout the agent's lifetime.\n", + "\n", + "2. **Flexible Learning**: More flexible language agents can treat learning on par with external actions, allowing it to be proposed as a possible action during decision-making. This means learning could be deferred until an appropriate time, rather than following a fixed schedule.\n", + "\n", + "3. **Updating Learning or Decision-Making**: It is theoretically possible for agents, such as those in the CoALA framework, to learn new procedures for learning or decision-making, thus enhancing adaptability. However, updates to these procedures are risky and might affect the agent's functionality.\n", + "\n", + "4. **Internal and External Actions**: Learning is part of the action space, which is divided into internal memory accesses and external interactions with the world. This separation supports planning and decision-making.\n", + "\n", + "5. **Risk and Safety**: \"Learning\" actions, especially those involving procedural deletion and modification, could pose risks to the internal structure of an agent.\n", + "\n", + "6. **Diverse Learning Procedures**: Language agents can select from various learning procedures, allowing them to rapidly adapt by storing task-relevant language instead of only updating model parameters. This flexibility enables them to leverage multiple forms of learning for self-improvement.\n", + "\n", + "These concepts emphasize the integration of learning within the broader decision-making and action framework of language agents.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: exit\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + " == Conversation Stored in Episodic Memory ==\n" + ] + } + ], + "source": [ + "# Simple storage for accumulated memories\n", + "conversations = []\n", + "what_worked = set()\n", + "what_to_avoid = set()\n", + "\n", + "# Start Storage for Historical Message History\n", + "messages = []\n", + "\n", + "while True:\n", + " # Get User's Message\n", + " user_input = input(\"\\nUser: \")\n", + " user_message = HumanMessage(content=user_input)\n", + " \n", + " # Generate new system prompt\n", + " system_prompt = episodic_system_prompt(user_input, vdb_client)\n", + " \n", + " # Reconstruct messages list with new system prompt first\n", + " messages = [\n", + " system_prompt, # New system prompt always first\n", + " *[msg for msg in messages if not isinstance(msg, SystemMessage)] # Old messages except system\n", + " ]\n", + " \n", + " if user_input.lower() == \"exit\":\n", + " add_episodic_memory(messages, vdb_client)\n", + " print(\"\\n == Conversation Stored in Episodic Memory ==\")\n", + " break\n", + " if user_input.lower() == \"exit_quiet\":\n", + " print(\"\\n == Conversation Exited ==\")\n", + " break\n", + " \n", + " # Get context and add it as a temporary message\n", + " context_message = semantic_rag(user_input, vdb_client)\n", + " \n", + " # Pass messages + context + user input to LLM\n", + " response = llm.invoke([*messages, context_message, user_message])\n", + " print(\"\\nAI Message: \", response.content)\n", + " \n", + " # Add only the user message and response to permanent history\n", + " messages.extend([user_message, response])" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "d0fe5955-24eb-4760-bbe0-b6860456e261", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HUMAN: Hello!\n", + "AI: Hello! How can I assist you today, Adam?\n", + "HUMAN: What are the four kinds of memory?\n", + "AI: The four kinds of memory mentioned in the provided context are:\n", + "\n", + "1. **Working Memory:** This stores the agent’s current circumstances, including recent perceptual input, goals, and results from intermediate, internal reasoning.\n", + "\n", + "2. **Procedural Memory:** This contains the production system itself, which is the set of rules that can be applied to working memory to determine the agent’s behavior.\n", + "\n", + "3. **Semantic Memory:** This stores facts about the world.\n", + "\n", + "4. **Episodic Memory:** This stores sequences of the agent’s past behaviors or experiences.\n", + "HUMAN: what's my favorite food?\n", + "AI: Your favorite food is chocolate lava cakes!\n" + ] + } + ], + "source": [ + "print(format_conversation(messages))" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "86b4f964-0f55-405d-bc40-9a51ed25409f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " If needed, Use this grounded context to factually answer the next question.\n", + " Let me know if you do not have enough information or context to answer a question.\n", + " \n", + " \n", + "CHUNK 1:\n", + "et al., 2023; Liu et al., 2023b). Integrated, multimodal reasoning may allow for more human-like behaviors: a\n", + "VLM-based agent could “see” a webpage, whereas a LLM-based agent would more likely be given raw HTML.\n", + "However, coupling the agent’s perception and reasoning systems makes the agent more domain-specific and\n", + "difficult to update. In either case, the basic architectural principles described by CoALA — internal memories,\n", + "a structured action space, and generalized decision-making — can be used to guide agent design.\n", + "Internal vs. external: what is the boundary between an agent and its environment? While\n", + "humans or robots are clearly distinct from their embodied environment, digital language agents have less\n", + "CHUNK 2:\n", + "framework, learning is a result action of a decision-making cycle just like grounding: the agent deliberately\n", + "chooses to commit information to long-term memory. This is in contrast to most agents, which simply fix a\n", + "learning schedule and only use decison making for external actions. Biological agents, however, do not have\n", + "this luxury: they must balance learning against external actions in their lifetime, choosing when and what to\n", + "learn (Mattar and Daw, 2018). More flexible language agents (Wang et al., 2023a; Park et al., 2023) would\n", + "follow a similar design and treat learning on par with external actions. Learning could be proposed as a\n", + "possible action during regular decision-making, allowing the agent to “defer” it until the appropriate time.\n", + "CHUNK 3:\n", + "Memory. Building on psychological theories, Soar uses several types of memory to track the agent’s\n", + "state (Atkinson and Shiffrin, 1968). Working memory (Baddeley and Hitch, 1974) reflects the agent’s current\n", + "circumstances: it stores the agent’s recent perceptual input, goals, and results from intermediate, internal\n", + "reasoning. Long term memory is divided into three distinct types. Procedural memory stores the production\n", + "system itself: the set of rules that can be applied to working memory to determine the agent’s behavior.\n", + "Semantic memory stores facts about the world (Lindes and Laird, 2016), while episodic memory stores\n", + "sequences of the agent’s past behaviors (Nuxoll and Laird, 2007).\n", + "Grounding. Soar can be instantiated in simulations (Tambe et al., 1995; Jones et al., 1999) or real-world\n", + "CHUNK 4:\n", + "S. Yao, R. Rao, M. Hausknecht, and K. Narasimhan. Keep CALM and explore: Language models for action\n", + "generation in text-based games. arXiv preprint arXiv:2010.02903 , 2020.\n", + "S. Yao, H. Chen, J. Yang, and K. Narasimhan. Webshop: Towards scalable real-world web interaction with\n", + "grounded language agents. Advances in Neural Information Processing Systems , 35:20744–20757, 2022a.\n", + "S. Yao, J. Zhao, D. Yu, N. Du, I. Shafran, K. Narasimhan, and Y. Cao. React: Synergizing reasoning and\n", + "acting in language models. arXiv preprint arXiv:2210.03629 , 2022b.\n", + "S. Yao, D. Yu, J. Zhao, I. Shafran, T. L. Griffiths, Y. Cao, and K. Narasimhan. Tree of thoughts: Deliberate\n", + "problem solving with large language models. arXiv preprint arXiv:2305.10601 , 2023.\n", + "CHUNK 5:\n", + "M. Hasan, C. Ozel, S. Potter, and E. Hoque. Sapien: Affective virtual agents powered by large language\n", + "models.arXiv preprint arXiv:2308.03022 , 2023.\n", + "22 Published in Transactions on Machine Learning Research (02/2024)\n", + "P. Haslum, N. Lipovetzky, D. Magazzeni, C. Muise, R. Brachman, F. Rossi, and P. Stone. An introduction to\n", + "the planning domain definition language , volume 13. Springer, 2019.\n", + "M. Hausknecht, P. Ammanabrolu, M.-A. Côté, and X. Yuan. Interactive fiction games: A colossal adventure.\n", + "InProceedings of the AAAI Conference on Artificial Intelligence , volume 34, pages 7903–7910, 2020.\n", + "S. Hong, X. Zheng, J. Chen, Y. Cheng, C. Zhang, Z. Wang, S. K. S. Yau, Z. Lin, L. Zhou, C. Ran, et al.\n", + "CHUNK 6:\n", + "B. Xu, Z. Peng, B. Lei, S. Mukherjee, Y. Liu, and D. Xu. Rewoo: Decoupling reasoning from observations\n", + "for efficient augmented language models. arXiv preprint arXiv:2305.18323 , 2023b.\n", + "B. Xu, A. Yang, J. Lin, Q. Wang, C. Zhou, Y. Zhang, and Z. Mao. ExpertPrompting: Instructing Large\n", + "Language Models to be Distinguished Experts. arXiv preprint arXiv:2305.14688 , 2023c.\n", + "J. Yang, A. Prabhakar, K. Narasimhan, and S. Yao. Intercode: Standardizing and benchmarking interactive\n", + "coding with execution feedback. arXiv preprint arXiv:2306.14898 , 2023.\n", + "S. Yao and K. Narasimhan. Language agents in the digital world: Opportunities and risks. princeton-\n", + "nlp.github.io , Jul 2023. URL https://princeton-nlp.github.io/language-agent-impact/ .\n", + "CHUNK 7:\n", + "helpful for the agent to have semantic memory containing the set of items for sale, as well as episodic\n", + "memory about each customer’s previous purchases and interactions. It will need procedural memory\n", + "defining functions to query these datastores, as well as working memory to track the dialogue state.\n", + "•Define the agent’s internal action space. This consists primarily of defining read and write\n", + "access to each of the agent’s memory modules. In our example, the agent should have read and write\n", + "access to episodic memory (so it can store new interactions with customers), but read-only access to\n", + "semantic and procedural memory (since it should not update the inventory or its own code).\n", + "•Define the decision-making procedure. This step specifies how reasoning and retrieval actions\n", + "CHUNK 8:\n", + "reasoning or retrieved from long-term memory), and other core information carried over from the previous\n", + "decision cycle (e.g., agent’s active goals). Previous methods encourage the LLM to generate intermediate\n", + "reasoning (Wei et al., 2022b; Nye et al., 2021), using the LLM’s own context as a form of working memory.\n", + "CoALA’s notion of working memory is more general: it is a data structure that persists across LLM calls.\n", + "On each LLM call, the LLM input is synthesized from a subset of working memory (e.g., a prompt template\n", + "and relevant variables). The LLM output is then parsed back into other variables (e.g., an action name\n", + "and arguments) which are stored back in working memory and used to execute the corresponding action\n", + "CHUNK 9:\n", + "X. Chen, M. Lin, N. Schärli, and D. Zhou. Teaching large language models to self-debug. arXiv preprint\n", + "arXiv:2304.05128 , 2023b.\n", + "Y. Chen, L. Yuan, G. Cui, Z. Liu, and H. Ji. A close look into the calibration of pre-trained language models.\n", + "arXiv preprint arXiv:2211.00151 , 2022.\n", + "N. Chomsky. Three models for the description of language. IRE Transactions on information theory , 2(3):\n", + "113–124, 1956.\n", + "A. Chowdhery, S. Narang, J. Devlin, M. Bosma, G. Mishra, A. Roberts, P. Barham, H. W. Chung, C. Sutton,\n", + "S. Gehrmann, et al. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 ,\n", + "2022.\n", + "P. F. Christiano, J. Leike, T. Brown, M. Martic, S. Legg, and D. Amodei. Deep reinforcement learning from\n", + "human preferences. Advances in neural information processing systems , 30, 2017.\n", + "CHUNK 10:\n", + "to affect the policy. While these examples essentially employ a fixed, read-only semantic memory, language\n", + "agents may also write new knowledge obtained from LLM reasoning into semantic memory as a form of\n", + "learning (Section 4.5) to incrementally build up world knowledge from experience.\n", + "Procedural memory . Language agents contain two forms of procedural memory: implicitknowledge stored\n", + "in the LLM weights, and explicitknowledge written in the agent’s code. The agent’s code can be further\n", + "divided into two types: procedures that implement actions (reasoning, retrieval, grounding, and learning\n", + "procedures), and procedures that implement decision-making itself (Section 4.6). During a decision cycle, the\n", + "CHUNK 11:\n", + "Laird (2022). B: Soar’s decision procedure uses productions to select and implement actions. These actions\n", + "may beinternal (such as modifying the agent’s memory) or external (such as a motor command).\n", + "simple production system implementing a thermostat agent:\n", + "(temperature >70◦)∧(temperature <72◦)→stop\n", + "temperature <32◦→call for repairs; turn on electric heater\n", + "(temperature <70◦)∧(furnace off)→turn on furnace\n", + "(temperature >72◦)∧(furnace on)→turn off furnace\n", + "Following this work, production systems were adopted by the AI community. The resulting agents con-\n", + "tained large production systems connected to external sensors, actuators, and knowledge bases – requiring\n", + "correspondingly sophisticated control flow. AI researchers defined “cognitive architectures” that mimicked\n", + "CHUNK 12:\n", + "of the environment, which can later be queried to execute instructions.\n", + "Updating LLM parameters (procedural memory). The LLM weights represent implicit procedural\n", + "knowledge. These can be adjusted to an agent’s domain by fine-tuning during the agent’s lifetime. Such fine-\n", + "tuningcanbeaccomplishedviasupervised(Liuetal.,2023c;Zhangetal.,2023b)orimitationlearning(Hussein\n", + "et al., 2017), reinforcement learning (RL) from environment feedback (Sutton and Barto, 2018), human\n", + "feedback (RLHF; Christiano et al., 2017; Ouyang et al., 2022; Nakano et al., 2021), or AI feedback (Bai et al.,\n", + "2022; Liu et al., 2023f). Classic LLM self-improvement methods (Huang et al., 2022a; Zelikman et al., 2022)\n", + "use an external measure such as consistency Wang et al. (2022b) to select generations to fine-tune on. In\n", + "CHUNK 13:\n", + "Language agents move beyond pre-defined prompt chains and instead place the LLM in a feedback loop with\n", + "the external environment (Fig. 1B). These approaches first transform multimodal input into text and pass it\n", + "to the LLM. The LLM’s output is then parsed and used to determine an external action (Fig. 3C). Early\n", + "agents interfaced the LLM directly with the external environment, using it to produce high-level instructions\n", + "based on the agent’s state (Ahn et al., 2022; Huang et al., 2022c; Dasgupta et al., 2022). Later work developed\n", + "more sophisticated language agents that use the LLM to perform intermediate reasoning before selecting\n", + "an action (Yao et al., 2022b). The most recent agents incorporate sophisticated learning strategies such as\n", + "CHUNK 14:\n", + "CoALA’s decision cycle is analogous to a program’s “main” procedure (amethodwithout return values, as\n", + "8 Published in Transactions on Machine Learning Research (02/2024)\n", + "Grounding Retrieval Learning Reasoning\n", + "PlanningExternal Internal\n", + "Figure 5: Agents’ action spaces can be divided into internal memory accesses and external interactions\n", + "with the world. Reasoning andretrieval actions are used to support planning.\n", + "opposed to functions ) that runs in loops continuously, accepting new perceptual input and calling various\n", + "actionprocedures in response.\n", + "CoALA (Figure 4) is inspired by the decades of research in cognitive architectures (Section 2.3), leveraging key\n", + "concepts such as memory, grounding, learning, and decision-making. Yet the incorporation of an LLM leads\n", + "CHUNK 15:\n", + "Y. Zhou, A. I. Muresanu, Z. Han, K. Paster, S. Pitis, H. Chan, and J. Ba. Large language models are\n", + "human-level prompt engineers. arXiv preprint arXiv:2211.01910 , 2022b.\n", + "32\n", + " \n" + ] + } + ], + "source": [ + "print(context_message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "8fcf4829-a565-4715-abf0-1bd1be462b2c", + "metadata": {}, + "source": [ + "---\n", + "## Procedural Memory\n", + "\n", + "\n", + "\n", + "*[10 Procedural Memory Examples](https://helpfulprofessor.com/procedural-memory-examples/)*\n", + "\n", + "Procedural memory is different from working, semantic, and episodic memory since it covers more how we actually remember to perform tasks or follow a familiar routine, i.e. riding a bike or typing on a keyboard. It's the \"how to do things\" type of memory, distinct from factual knowledge (semantic) or specific experiences (episodic). This memory system enables us to execute complex sequences of actions without conscious recall of each individual step.\n", + "\n", + "In terms of an LLM agent, procedural memory more abstractly consists of both its underlying language model weights and the framework code that defines information processing and response generation. The key difference from other memory types is that procedural memory encompasses the fundamental operations that make the system work - the core mechanisms that drive its behavior and capabilities.\n", + "\n", + "This takes two explicit forms: the learned patterns stored in the language model's weights from training, and the structured codebase that orchestrates memory interactions and shapes system behavior. Learning in procedural memory occurs through two main paths: adjustments to the language model's weights via fine-tuning or training, and updates to the system's core code. While fine-tuning enhances the model's language understanding and generation, code modifications can strengthen operations, enhance retrieval methods, or introduce new capabilities. These changes require careful implementation as they alter the system's fundamental operations.\n", + "\n", + "This procedural foundation also enables the integration of all memory systems. The language model's weights provide essential language processing abilities, while the framework code coordinates between working memory's current context, episodic memory's past experiences, and semantic memory's knowledge base. This architecture allows the system to transform understanding into effective action." + ] + }, + { + "cell_type": "markdown", + "id": "277c457c-052a-4b1a-885a-7dd5a0191b2f", + "metadata": {}, + "source": [ + "**Defining Permanent Instructions**\n", + "\n", + "Enabling an LLM to literally alter it's code and framework can be tricky to get right, we'll implement a smaller component of our overall system as an example, as well as more explicitly define our agent's structure. This will take the form of persistent instructions learned from prior interactions that will be attached as additional instructions, and updated as additional learnings from further conversations are created.\n", + "\n", + "We extend the original prompt with its episodic memory to now include procedural memory" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "ded6cfc9-0339-40ab-97ab-6b4d90d8b44e", + "metadata": {}, + "outputs": [], + "source": [ + "def episodic_system_prompt(query, vdb_client):\n", + " # Get new memory\n", + " memory = episodic_recall(query, vdb_client)\n", + " \n", + " # Load Existing Procedural Memory Instructions\n", + " with open(\"./procedural_memory.txt\", \"r\") as content:\n", + " procedural_memory = content.read()\n", + " \n", + " # Get current conversation\n", + " current_conversation = memory.objects[0].properties['conversation']\n", + " \n", + " # Update memory stores, excluding current conversation from history\n", + " if current_conversation not in conversations:\n", + " conversations.append(current_conversation)\n", + " what_worked.update(memory.objects[0].properties['what_worked'].split('. '))\n", + " what_to_avoid.update(memory.objects[0].properties['what_to_avoid'].split('. '))\n", + " \n", + " # Get previous conversations excluding the current one\n", + " previous_convos = [conv for conv in conversations[-4:] if conv != current_conversation][-3:]\n", + " \n", + " # Create prompt with accumulated history\n", + " episodic_prompt = f\"\"\"You are a helpful AI Assistant. Answer the user's questions to the best of your ability.\n", + " You recall similar conversations with the user, here are the details:\n", + " \n", + " Current Conversation Match: {current_conversation}\n", + " Previous Conversations: {' | '.join(previous_convos)}\n", + " What has worked well: {' '.join(what_worked)}\n", + " What to avoid: {' '.join(what_to_avoid)}\n", + " \n", + " Use these memories as context for your response to the user.\n", + " \n", + " Additionally, here are 10 guidelines for interactions with the current user: {procedural_memory}\"\"\"\n", + " \n", + " return SystemMessage(content=episodic_prompt)" + ] + }, + { + "cell_type": "markdown", + "id": "973cb345-3e27-4db1-b830-83c35dca332d", + "metadata": {}, + "source": [ + "**Updating Procedural Memory**\n", + "\n", + "As a simple toy example, we will take in our existing list, add the running list of what we've learned across conversation from episodic memory, and update our list of procedural memories." + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "id": "b4ef9802-456a-4254-8210-161ad284726e", + "metadata": {}, + "outputs": [], + "source": [ + "def procedural_memory_update(what_worked, what_to_avoid):\n", + "\n", + " # Load Existing Procedural Memory Instructions\n", + " with open(\"./procedural_memory.txt\", \"r\") as content:\n", + " current_takeaways = content.read()\n", + "\n", + " # Load Existing and Gathered Feedback into Prompt\n", + " procedural_prompt = f\"\"\"You are maintaining a continuously updated list of the most important procedural behavior instructions for an AI assistant. Your task is to refine and improve a list of key takeaways based on new conversation feedback while maintaining the most valuable existing insights.\n", + "\n", + " CURRENT TAKEAWAYS:\n", + " {current_takeaways}\n", + "\n", + " NEW FEEDBACK:\n", + " What Worked Well:\n", + " {what_worked}\n", + "\n", + " What To Avoid:\n", + " {what_to_avoid}\n", + "\n", + " Please generate an updated list of up to 10 key takeaways that combines:\n", + " 1. The most valuable insights from the current takeaways\n", + " 2. New learnings from the recent feedback\n", + " 3. Any synthesized insights combining multiple learnings\n", + "\n", + " Requirements for each takeaway:\n", + " - Must be specific and actionable\n", + " - Should address a distinct aspect of behavior\n", + " - Include a clear rationale\n", + " - Written in imperative form (e.g., \"Maintain conversation context by...\")\n", + "\n", + " Format each takeaway as:\n", + " [#]. [Instruction] - [Brief rationale]\n", + "\n", + " The final list should:\n", + " - Be ordered by importance/impact\n", + " - Cover a diverse range of interaction aspects\n", + " - Focus on concrete behaviors rather than abstract principles\n", + " - Preserve particularly valuable existing takeaways\n", + " - Incorporate new insights when they provide meaningful improvements\n", + "\n", + " Return up to but no more than 10 takeaways, replacing or combining existing ones as needed to maintain the most effective set of guidelines.\n", + " Return only the list, no preamble or explanation.\n", + " \"\"\"\n", + "\n", + " # Generate New Procedural Memory\n", + " procedural_memory = llm.invoke(procedural_prompt)\n", + "\n", + " # Write to File\n", + " with open(\"./procedural_memory.txt\", \"w\") as content:\n", + " content.write(procedural_memory.content)\n", + "\n", + " return\n", + "\n", + "# prompt = procedural_memory_update(what_worked, what_to_avoid)" + ] + }, + { + "cell_type": "markdown", + "id": "d4e9504e-17f8-4199-b13c-27f08d8ac30a", + "metadata": {}, + "source": [ + "**Full Working Memory Demonstration**\n", + "\n", + "\n", + "\n", + "Current flow will:\n", + "\n", + "1. Take a user's message\n", + "2. Create a system prompt with relevant Episodic enrichment\n", + "3. Insert procedural memory into prompt\n", + "4. Create a Semantic memory message with context from the database\n", + "5. Reconstruct the entire working memory to update the system prompt and attach the semantic memory and new user messages to the end\n", + "6. Generate a response with the LLM" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "id": "ce0eadcd-ee4f-480f-9bae-28f4bb5dc774", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: Hi!\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: Hello, Adam! How can I assist you today?\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What's my favorite food?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: Your favorite food is chocolate lava cakes!\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What have we talked about with memory systems\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI Message: We've discussed how memory systems interact with agents, particularly in the context of language agents. Here's a summary of our discussion:\n", + "\n", + "1. **Working Memory**: Stores active and readily available information needed for the current decision cycle.\n", + "\n", + "2. **Long-Term Memories**:\n", + " - **Episodic Memory**: Contains sequences of past interactions or events.\n", + " - **Semantic Memory**: Holds factual knowledge about the world.\n", + " - **Procedural Memory**: Stores rules or procedures the agent follows for decision-making and actions.\n", + "\n", + "3. **Internal and External Actions**: The agent's decision-making involves internal actions (retrieval, reasoning, and learning) that interact with these memory modules. For example:\n", + " - **Retrieval Actions**: Access information from long-term memories into working memory.\n", + " - **Reasoning Actions**: Update working memory with insights from the language model.\n", + " - **Learning Actions**: Write new information to long-term memory for adaptation.\n", + "\n", + "4. **Decision-Making Cycle**: The agent uses these memory systems to plan and execute actions, balancing learning with external interactions.\n", + "\n", + "Overall, memory systems are essential for enabling agents to store, retrieve, and utilize information effectively, supporting complex behaviors and learning processes.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: exit\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + " == Conversation Stored in Episodic Memory ==\n", + "\n", + "== Procedural Memory Updated ==\n" + ] + } + ], + "source": [ + "# Simple storage for accumulated memories\n", + "conversations = []\n", + "what_worked = set()\n", + "what_to_avoid = set()\n", + "\n", + "# Start Storage for Historical Message History\n", + "messages = []\n", + "\n", + "while True:\n", + " # Get User's Message\n", + " user_input = input(\"\\nUser: \")\n", + " user_message = HumanMessage(content=user_input)\n", + " \n", + " # Generate new system prompt\n", + " system_prompt = episodic_system_prompt(user_input, vdb_client)\n", + " \n", + " # Reconstruct messages list with new system prompt first\n", + " messages = [\n", + " system_prompt, # New system prompt always first\n", + " *[msg for msg in messages if not isinstance(msg, SystemMessage)] # Old messages except system\n", + " ]\n", + " \n", + " if user_input.lower() == \"exit\":\n", + " add_episodic_memory(messages, vdb_client)\n", + " print(\"\\n == Conversation Stored in Episodic Memory ==\")\n", + " procedural_memory_update(what_worked, what_to_avoid)\n", + " print(\"\\n== Procedural Memory Updated ==\")\n", + " break\n", + " if user_input.lower() == \"exit_quiet\":\n", + " print(\"\\n == Conversation Exited ==\")\n", + " break\n", + " \n", + " # Get context and add it as a temporary message\n", + " context_message = semantic_rag(user_input, vdb_client)\n", + " \n", + " # Pass messages + context + user input to LLM\n", + " response = llm.invoke([*messages, context_message, user_message])\n", + " print(\"\\nAI Message: \", response.content)\n", + " \n", + " # Add only the user message and response to permanent history\n", + " messages.extend([user_message, response])" + ] + }, + { + "cell_type": "markdown", + "id": "9c5aba41-0f13-4e7d-9b27-d075a75e85fc", + "metadata": {}, + "source": [ + "**Looking At The Conversation**" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "id": "7f9f7a73-4cf5-414b-8f59-8a7a6145dac7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HUMAN: Hi!\n", + "AI: Hello, Adam! How can I assist you today?\n", + "HUMAN: What's my favorite food?\n", + "AI: Your favorite food is chocolate lava cakes!\n", + "HUMAN: What have we talked about with memory systems\n", + "AI: We've discussed how memory systems interact with agents, particularly in the context of language agents. Here's a summary of our discussion:\n", + "\n", + "1. **Working Memory**: Stores active and readily available information needed for the current decision cycle.\n", + "\n", + "2. **Long-Term Memories**:\n", + " - **Episodic Memory**: Contains sequences of past interactions or events.\n", + " - **Semantic Memory**: Holds factual knowledge about the world.\n", + " - **Procedural Memory**: Stores rules or procedures the agent follows for decision-making and actions.\n", + "\n", + "3. **Internal and External Actions**: The agent's decision-making involves internal actions (retrieval, reasoning, and learning) that interact with these memory modules. For example:\n", + " - **Retrieval Actions**: Access information from long-term memories into working memory.\n", + " - **Reasoning Actions**: Update working memory with insights from the language model.\n", + " - **Learning Actions**: Write new information to long-term memory for adaptation.\n", + "\n", + "4. **Decision-Making Cycle**: The agent uses these memory systems to plan and execute actions, balancing learning with external interactions.\n", + "\n", + "Overall, memory systems are essential for enabling agents to store, retrieve, and utilize information effectively, supporting complex behaviors and learning processes.\n" + ] + } + ], + "source": [ + "print(format_conversation(messages))" + ] + }, + { + "cell_type": "markdown", + "id": "0719b8e1-3e59-47de-90f6-5a6d60ca128b", + "metadata": {}, + "source": [ + "**Looking At Current System Prompt**" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "5e83e46f-a349-4675-9b7f-b9388f192eba", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are a helpful AI Assistant. Answer the user's questions to the best of your ability.\n", + " You recall similar conversations with the user, here are the details:\n", + " \n", + " Current Conversation Match: HUMAN: Hello!\n", + "AI: Hello!\n", + "HUMAN: What's my favorite food?\n", + "AI: I don't have that information. What's your favorite food?\n", + "HUMAN: My favorite food is chocolate lava cakes!\n", + "AI: Yum, chocolate lava cakes are delicious!\n", + "HUMAN: What's my name?\n", + "AI: You said your name is Adam.\n", + " Previous Conversations: HUMAN: What's my favorite food?\n", + "AI: Your favorite food is chocolate lava cakes!\n", + "HUMAN: What's my name!\n", + "AI: Your name is Adam.\n", + "HUMAN: Lit, what's my name?\n", + "AI: Your name is Adam. | HUMAN: Hello!\n", + "AI: Hello, Adam! How can I assist you today?\n", + "HUMAN: What's my favorite food!\n", + "AI: Your favorite food is chocolate lava cakes! 🍫🍰\n", + "HUMAN: I hate emojis, don't EVER use them!\n", + "AI: Got it, Adam! I'll avoid using emojis in our conversations. Thanks for letting me know.\n", + "HUMAN: how do memory systems interact with agents\n", + "AI: In the context of language agents, memory systems interact with agents by organizing information into multiple memory modules that facilitate decision-making and interaction with the environment. Here’s a breakdown of how these memory systems function:\n", + "\n", + "1. **Working Memory**: This is where active and readily available information is stored as symbolic variables for the current decision cycle. It contains perceptual inputs and active knowledge needed for immediate tasks.\n", + "\n", + "2. **Long-Term Memories**: These are divided into:\n", + " - **Episodic Memory**: Stores sequences of past interactions or events, which can be used for reflection and generating new inferences.\n", + " - **Semantic Memory**: Contains factual knowledge about the world, which helps in reasoning and making informed decisions.\n", + " - **Procedural Memory**: Holds the rules or procedures the agent follows, including functions for interacting with other memory modules and executing actions.\n", + "\n", + "3. **Interaction with Internal and External Actions**: The agent’s decision-making process involves internal actions (like retrieval, reasoning, and learning) that interact with these memory modules. For instance:\n", + " - **Retrieval Actions**: Read information from long-term memories into working memory for immediate use.\n", + " - **Reasoning Actions**: Update working memory with insights generated by the language model.\n", + " - **Learning Actions**: Write new information to long-term memory, helping the agent adapt over time.\n", + "\n", + "4. **Decision-Making Cycle**: The agent uses these memory systems to plan and execute actions, balancing between learning and external interactions. This involves selecting which memories to access or update based on current goals and observations.\n", + "\n", + "Overall, memory systems are crucial for enabling agents to store, retrieve, and utilize information effectively, supporting complex behaviors and learning processes.\n", + " What has worked well: Directly asking the user for their preferences to gather necessary information. Providing consistent and accurate personal information when asked. Providing a structured breakdown of different memory types and their functions in agent decision-making.\n", + " What to avoid: Using emojis as the user explicitly requested not to use them. N/A\n", + " \n", + " Use these memories as context for your response to the user.\n", + " \n", + " Additionally, here are 10 guidelines for interactions with the current user: 1. Directly ask for and confirm user preferences before making suggestions - Ensures recommendations are personalized and relevant, avoiding assumptions.\n", + "\n", + "2. Maintain conversation context by recalling previous interactions - Builds rapport and demonstrates attention to user preferences over time.\n", + "\n", + "3. Provide structured overviews and use concise language with examples when explaining complex topics - Facilitates understanding and ensures instructions are actionable.\n", + "\n", + "4. Confirm and repeat the user's name throughout the conversation - Personalizes the interaction and enhances engagement by acknowledging recognition.\n", + "\n", + "5. Respect user choices and offer alternatives if initial suggestions don't resonate - Shows flexibility and commitment to user satisfaction.\n", + "\n", + "6. Verify user interest before providing additional information - Prevents overwhelming users and maintains engagement by checking their interest first.\n", + "\n", + "7. Acknowledge and confirm the receipt of user feedback - Reinforces trust and shows commitment to continuous improvement.\n", + "\n", + "8. Encourage and incorporate user feedback promptly into service improvements - Engages users and ensures a responsive interaction experience.\n", + "\n", + "9. Maintain a friendly and helpful tone throughout interactions - Fosters a positive user experience and promotes ongoing engagement.\n", + "\n", + "10. Promptly ask users to provide any missing personal information - Completes the conversation and enhances personalization, increasing satisfaction.\n" + ] + } + ], + "source": [ + "print(system_prompt.content)" + ] + }, + { + "cell_type": "markdown", + "id": "6c034480-abd7-4720-bb70-97d0d6dc6400", + "metadata": {}, + "source": [ + "**Looking At the Context Message**" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "id": "35038cec-8090-4411-91d2-a43ba3ca5c70", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " If needed, Use this grounded context to factually answer the next question.\n", + " Let me know if you do not have enough information or context to answer a question.\n", + " \n", + " \n", + "CHUNK 1:\n", + "helpful for the agent to have semantic memory containing the set of items for sale, as well as episodic\n", + "memory about each customer’s previous purchases and interactions. It will need procedural memory\n", + "defining functions to query these datastores, as well as working memory to track the dialogue state.\n", + "•Define the agent’s internal action space. This consists primarily of defining read and write\n", + "access to each of the agent’s memory modules. In our example, the agent should have read and write\n", + "access to episodic memory (so it can store new interactions with customers), but read-only access to\n", + "semantic and procedural memory (since it should not update the inventory or its own code).\n", + "•Define the decision-making procedure. This step specifies how reasoning and retrieval actions\n", + "CHUNK 2:\n", + "et al., 2023; Liu et al., 2023b). Integrated, multimodal reasoning may allow for more human-like behaviors: a\n", + "VLM-based agent could “see” a webpage, whereas a LLM-based agent would more likely be given raw HTML.\n", + "However, coupling the agent’s perception and reasoning systems makes the agent more domain-specific and\n", + "difficult to update. In either case, the basic architectural principles described by CoALA — internal memories,\n", + "a structured action space, and generalized decision-making — can be used to guide agent design.\n", + "Internal vs. external: what is the boundary between an agent and its environment? While\n", + "humans or robots are clearly distinct from their embodied environment, digital language agents have less\n", + "CHUNK 3:\n", + "manipulation. Production systems are one such formalism. Intuitively, production systems consist of a set\n", + "of rules, each specifying a precondition and an action. When the precondition is met, the action can be\n", + "taken. The idea originates in efforts to characterize the limits of computation. Post (1943) proposed thinking\n", + "about arbitrary logical systems in these terms, where formulas are expressed as strings and the conclusions\n", + "they license are identified by production rules (as one string “produces” another). This formulation was\n", + "subsequently shown to be equivalent to a simpler string rewriting system. In such a system, we specify rules\n", + "of the form\n", + "X Y Z→X W Z\n", + "indicating that the string XY Zcan be rewritten to the string XWZ. String rewriting plays a significant\n", + "CHUNK 4:\n", + "framework, learning is a result action of a decision-making cycle just like grounding: the agent deliberately\n", + "chooses to commit information to long-term memory. This is in contrast to most agents, which simply fix a\n", + "learning schedule and only use decison making for external actions. Biological agents, however, do not have\n", + "this luxury: they must balance learning against external actions in their lifetime, choosing when and what to\n", + "learn (Mattar and Daw, 2018). More flexible language agents (Wang et al., 2023a; Park et al., 2023) would\n", + "follow a similar design and treat learning on par with external actions. Learning could be proposed as a\n", + "possible action during regular decision-making, allowing the agent to “defer” it until the appropriate time.\n", + "CHUNK 5:\n", + "Intriguingly, LLMs appear well-posed to meet these challenges. First, they operate over arbitrary text, making\n", + "them more flexible than logic-based systems. Second, rather than requiring the user to specify productions,\n", + "they learn a distribution over productions via pre-training on an internet corpus. Recognizing this, researchers\n", + "have begun to use LLMs within cognitive architectures, leveraging their implicit world knowledge (Wray\n", + "et al., 2021) to augment traditional symbolic approaches (Kirk et al., 2023; Romero et al., 2023). Here, we\n", + "instead import principles from cognitive architecture to guide the design of LLM-based agents.\n", + "2.4 Language models and agents\n", + "Language modeling is a decades-old endeavor in the NLP and AI communities, aiming to develop systems\n", + "CHUNK 6:\n", + "Published in Transactions on Machine Learning Research (02/2024)\n", + "Cognitive Architectures for Language Agents\n", + "Theodore R. Sumers∗Shunyu Yao∗Karthik Narasimhan Thomas L. Griffiths\n", + "Princeton University\n", + "{sumers, shunyuy, karthikn, tomg}@princeton.edu\n", + "Reviewed on OpenReview: https: // openreview. net/ forum? id= 1i6ZCvflQJ\n", + "Abstract\n", + "Recent efforts have augmented large language models (LLMs) with external resources (e.g.,\n", + "the Internet) or internal control flows (e.g., prompt chaining) for tasks requiring grounding\n", + "or reasoning, leading to a new class of language agents . While these agents have achieved\n", + "substantial empirical success, we lack a framework to organize existing agents and plan future\n", + "developments. In this paper, we draw on the rich history of cognitive science and symbolic\n", + "CHUNK 7:\n", + "starting point, breaking this process down into a series of string rewriting operations. Language models also\n", + "define a possible set of expansions or modifications of a string – the prompt provided to the model.‡\n", + "For example, we can formulate the problem of completing a piece of text as a production. If Xis the prompt\n", + "andYthe continuation, then we can write this as the production X→X Y.§We might want to allow\n", + "multiple possible continuations, in which case we have X→X Yifor some set of Yi. LLMs assign a probability\n", + "to each of these completions. Viewed from this perspective, the LLM defines a probability distribution\n", + "overwhich productions to select when presented with input X, yielding a distribution P(Yi|X)over possible\n", + "CHUNK 8:\n", + "and decision-making capabilities is an exciting and emerging direction that promises to bring these agents\n", + "closer to human-like intelligence.\n", + "3 Connections between Language Models and Production Systems\n", + "Based on their common origins in processing strings, there is a natural analogy between production systems\n", + "and language models. We develop this analogy, then show that prompting methods recapitulate the algorithms\n", + "and agents based on production systems. The correspondence between production systems and language\n", + "models motivates our use of cognitive architectures to build language agents, which we introduce in Section 4.\n", + "3.1 Language models as probabilistic production systems\n", + "In their original instantiation, production systems specified the set of strings that could be generated from a\n", + "CHUNK 9:\n", + "We first introduce production systems and cognitive architectures, providing a historical perspective on\n", + "cognitive science and artificial intelligence: beginning with theories of logic and computation (Post, 1943),\n", + "and ending with attempts to build symbolic artificial general intelligence (Newell et al., 1989). We then\n", + "briefly introduce language models and language agents. Section 3 will connect these ideas, drawing parallels\n", + "between production systems and language models.\n", + "2.1 Production systems for string manipulation\n", + "In the first half of the twentieth century, a significant line of intellectual work led to the reduction of\n", + "mathematics (Whitehead and Russell, 1997) and computation (Church, 1932; Turing et al., 1936) to symbolic\n", + "CHUNK 10:\n", + "Semantic memory . Semantic memory stores an agent’s knowledge about the world and itself. Traditional\n", + "NLP or RL approaches that leverage retrieval for reasoning or decision-making initialize semantic memory\n", + "from an external database for knowledge support. For example, retrieval-augmented methods in NLP (Lewis\n", + "et al., 2020; Borgeaud et al., 2022; Chen et al., 2017) can be viewed as retrieving from a semantic memory of\n", + "9 Published in Transactions on Machine Learning Research (02/2024)\n", + "unstructured text (e.g., Wikipedia). In RL, “reading to learn” approaches (Branavan et al., 2012; Narasimhan\n", + "et al., 2018; Hanjie et al., 2021; Zhong et al., 2021) leverage game manuals and facts as a semantic memory\n", + "CHUNK 11:\n", + "Memory. Building on psychological theories, Soar uses several types of memory to track the agent’s\n", + "state (Atkinson and Shiffrin, 1968). Working memory (Baddeley and Hitch, 1974) reflects the agent’s current\n", + "circumstances: it stores the agent’s recent perceptual input, goals, and results from intermediate, internal\n", + "reasoning. Long term memory is divided into three distinct types. Procedural memory stores the production\n", + "system itself: the set of rules that can be applied to working memory to determine the agent’s behavior.\n", + "Semantic memory stores facts about the world (Lindes and Laird, 2016), while episodic memory stores\n", + "sequences of the agent’s past behaviors (Nuxoll and Laird, 2007).\n", + "Grounding. Soar can be instantiated in simulations (Tambe et al., 1995; Jones et al., 1999) or real-world\n", + "CHUNK 12:\n", + "guide to discover and interpret those implicit circuits. Of course, as discussed in Section 6, agent usecases\n", + "will also help discover, define and shape LLM capabilities. Similar to how chips and computer architectures\n", + "have co-evolved, language model and agent design should also develop a reciprocal path forward.\n", + "8 Conclusion\n", + "We proposed Cognitive Architectures for Language Agents (CoALA), a conceptual framework to describe\n", + "and build language agents. Our framework draws inspiration from the rich history of symbolic artificial\n", + "intelligence and cognitive science, connecting decades-old insights to frontier research on large language\n", + "models. We believe this approach provides a path towards developing more general and more human-like\n", + "artificial intelligence.\n", + "Acknowledgements\n", + "CHUNK 13:\n", + "from experience (Nason and Laird, 2005). Most remarkably, Soar is also capable of writing new productions\n", + "into its procedural memory (Laird et al., 1986) – effectively updating its source code.\n", + "Cognitive architectures were used broadly across psychology and computer science, with applications including\n", + "robotics (Laird et al., 2012), military simulations (Jones et al., 1999; Tambe et al., 1995), and intelligent\n", + "tutoring (Koedinger et al., 1997). Yet they have become less popular in the AI community over the last few\n", + "decades. This decrease in popularity reflects two of the challenges involved in such systems: they are limited\n", + "to domains that can be described by logical predicates and require many pre-specified rules to function.\n", + "CHUNK 14:\n", + "writes to working memory. This allows the agent to summarize and distill insights about the most recent\n", + "observation (Yao et al., 2022b; Peng et al., 2023), the most recent trajectory (Shinn et al., 2023), or\n", + "information retrieved from long-term memory (Park et al., 2023). Reasoning can be used to support learning\n", + "(by writing the results into long-term memory) or decision-making (by using the results as additional context\n", + "for subsequent LLM calls).\n", + "4.5 Learning actions\n", + "Learning occurs by writing information to long-term memory, which includes a spectrum of diverse procedures.\n", + "Updating episodic memory with experience. It is common practice for RL agents to store episodic\n", + "trajectories to update a parametric policy (Blundell et al., 2016; Pritzel et al., 2017) or establish a non-\n", + "CHUNK 15:\n", + "parametric policy (Ecoffet et al., 2019; Tuyls et al., 2022). For language agents, added experiences in episodic\n", + "memory may be retrieved later as examples and bases for reasoning or decision-making (Weston et al., 2014;\n", + "Rubin et al., 2021; Park et al., 2023).\n", + "Updating semantic memory with knowledge. Recent work (Shinn et al., 2023; Park et al., 2023) has\n", + "applied LLMs to reason about raw experiences and store the resulting inferences in semantic memory. For\n", + "example, Reflexion (Shinn et al., 2023) uses an LLM to reflect on failed episodes and stores the results (e.g.,\n", + "“there is no dishwasher in kitchen”) as semantic knowledge to be attached to LLM context for solving later\n", + "episodes. Finally, work in robotics (Chen et al., 2023a) uses vision-language models to build a semantic map\n", + " \n" + ] + } + ], + "source": [ + "print(context_message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "e2423efc-53c3-4653-9a35-b78f97ab2b37", + "metadata": {}, + "source": [ + "---\n", + "## Discussion\n", + "\n", + "\n", + "\n", + "Memory systems enable us to move beyond using LLMs as simple input/output models into agents that can operate with forms of persistent understanding and learning. Each memory type serves a distinct cognitive purpose:\n", + "\n", + "**Working Memory**\n", + "The immediate cognitive workspace - keeping track of and contextualizing what's happening right now. For LLMs specifically, this combats the stateless nature of model calls by maintaining active context.\n", + "\n", + "**Episodic Memory** \n", + "Historical experiences and their associated learnings. Not just storing past events, but also the ability to reflect on and learn from them through applying your memory of similar episodes to new experiences. Allows LLMs to extract meaningful patterns and insights from experiences and use them in the future.\n", + "\n", + "**Semantic Memory**\n", + "Pure knowledge representation, separate from specific experiences. While LLMs have knowledge baked into their weights, semantic memory provides explicit, queryable facts that can ground responses. This enables dynamic knowledge integration rather than relying solely on training data.\n", + "\n", + "**Procedural Memory**\n", + "Both implicit in model weights and explicit in code, this shapes how the other memory systems are used and how the agent actually executes behaviors. Unlike the other memory types, changes here fundamentally alter how the agent functions.\n", + "\n", + "Together, working memory actively manipulates current context, retrieving relevant experiences from episodic memory, grounding in semantic knowledge, all guided by procedural patterns. Each type builds on the others to enable increasingly sophisticated cognitive capabilities with LLM system design." + ] + }, + { + "cell_type": "markdown", + "id": "b048516e-665d-4eab-b888-c52a278ea96e", + "metadata": {}, + "source": [ + "---\n", + "## Additional Examples of Memory Implementation from Research\n", + "\n", + "For a comprehensive survey of advanced memory techniques and applications, check out [A Survey on the Memory Mechanism of Large\n", + "Language Model based Agents](https://arxiv.org/pdf/2404.13501)! Here are a few notable mentions:\n", + "\n", + "#### [MemGPT: Towards LLMs as Operating Systems](https://arxiv.org/pdf/2310.08560)\n", + "\n", + "\n", + "\n", + "MemGPT is a system that enables large language models (LLMs) to handle context beyond their fixed context window limits by implementing a hierarchical memory system inspired by traditional operating systems. Just as operating systems use virtual memory to page data between physical memory and disk, MemGPT manages different storage tiers to effectively extend an LLM's limited context window. The system has three main memory components: a read-only system instructions section, a read/write working context for storing key information, and a FIFO queue for message history - all within the LLM's main context window (analogous to RAM). When this main context approaches capacity, MemGPT can move less immediately relevant information to external \"archival storage\" and \"recall storage\" (analogous to disk storage). The system uses function calls to intelligently manage what information stays in the main context versus what gets moved to external storage, and can retrieve relevant information back into the main context when needed through search and pagination mechanisms.\n", + "\n", + "#### [VOYAGER: An Open-Ended Embodied Agent with Large Language Models](https://arxiv.org/pdf/2305.16291)\n", + "\n", + "\n", + "\n", + "Voyager is an autonomous AI agent that explores and learns to play Minecraft using GPT-4 as its core reasoning engine. Its memory and learning system centers around three key components: an automatic curriculum that proposes appropriately challenging tasks based on the agent's current capabilities, a skill library that stores successful code programs as reusable skills, and an iterative prompting mechanism that refines actions through environmental feedback. The skill library acts as Voyager's long-term memory, where each mastered skill is stored as executable code indexed by embeddings of its description, allowing relevant skills to be retrieved and composed into more complex behaviors when facing new challenges. Through this system, Voyager accumulates knowledge by storing successful code patterns rather than relying on traditional parameter updates or gradient-based learning, enabling it to continually build upon its capabilities while avoiding catastrophic forgetting. This approach allows Voyager to organically explore and master increasingly sophisticated tasks, from basic resource gathering to complex tool crafting, while maintaining the ability to reuse and adapt its learned skills in new situations.\n", + "\n", + "#### [Think-in-Memory: Recalling and Post-thinking Enable LLMs with Long-Term Memory](https://arxiv.org/pdf/2311.08719)\n", + "\n", + "\n", + "\n", + "TiM (Think-in-Memory) is a memory mechanism for Large Language Models (LLMs) that enables more consistent long-term memory by storing and recalling thoughts rather than raw conversation history. Instead of repeatedly reasoning over past conversations, TiM operates in two key stages: first, it recalls relevant thoughts from memory before generating a response, and second, it performs \"post-thinking\" after generating a response to update its memory with new thoughts. These thoughts are stored using Locality-Sensitive Hashing (LSH) for efficient retrieval and organization. The system supports three main operations: inserting new thoughts, forgetting unnecessary ones, and merging similar thoughts. By storing processed thoughts rather than raw conversations, TiM avoids the inconsistency problems that can arise from repeatedly reasoning over the same history in different ways, while also making retrieval more efficient since it only needs to search within relevant thought clusters.\n", + "\n", + "#### [Retroformer: Retrospective Large Language Agents with Policy Gradient Optimization](https://arxiv.org/pdf/2308.02151)\n", + "\n", + "\n", + "\n", + "Retroformer is a framework for improving large language model (LLM) agents through a plug-in retrospective model that automatically refines agent prompts based on environmental feedback. Its memory system works through three components: 1) short-term memory from the trajectory history of the current episode, 2) long-term memory from self-reflection responses that summarize prior failed attempts and are appended to the actor prompt, and 3) a replay buffer that stores triplets of reflection prompts, responses, and episode returns across different tasks and environments. The retrospective model uses policy gradient optimization to learn from these memories - it analyzes failed attempts, generates reflective feedback, and updates its parameters to produce better prompting that helps the agent avoid past mistakes. Rather than trying to modify the core LLM agent (which remains frozen), Retroformer focuses on optimizing this retrospective component to provide better guidance through refined prompts, allowing the agent to improve over time while maintaining its original capabilities.\n", + "\n", + "### [MemoryBank: Enhancing Large Language Models with Long-Term Memory](https://arxiv.org/pdf/2305.10250)\n", + "\n", + "\n", + "\n", + "MemoryBank is a long-term memory system designed for Large Language Models that consists of three core components: a memory storage system, a memory retrieval mechanism, and a memory updating system inspired by human cognition. The memory storage maintains detailed conversation logs, event summaries, and evolving user personality profiles in a hierarchical structure. When new interactions occur, a dual-tower dense retrieval model (similar to Dense Passage Retrieval) encodes both the current context and stored memories into vector representations, then uses FAISS indexing to efficiently retrieve relevant past information. The system uniquely incorporates an Ebbinghaus Forgetting Curve-based updating mechanism that allows memories to naturally decay over time unless reinforced through repeated recall, mimicking human memory patterns. The memory strength is modeled as a discrete value that increases when information is recalled, with memories becoming more resistant to forgetting through repeated access. This comprehensive approach enables LLMs to maintain context over extended periods, understand user personalities, and provide more personalized interactions while simulating natural memory retention and decay patterns." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73955533-3bdd-4b6f-91db-0af790967beb", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/agentic-memory/docker-compose.yml b/agentic-memory/docker-compose.yml new file mode 100644 index 0000000..ad4fbdd --- /dev/null +++ b/agentic-memory/docker-compose.yml @@ -0,0 +1,29 @@ +--- +services: + weaviate: + command: + - --host + - 0.0.0.0 + - --port + - '8080' + - --scheme + - http + image: cr.weaviate.io/semitechnologies/weaviate:1.27.0 + ports: + - 8080:8080 + - 50051:50051 + volumes: + - weaviate_data:/var/lib/weaviate + restart: on-failure:0 + environment: + QUERY_DEFAULTS_LIMIT: 25 + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true' + PERSISTENCE_DATA_PATH: '/var/lib/weaviate' + DEFAULT_VECTORIZER_MODULE: 'text2vec-ollama' + ENABLE_MODULES: 'text2vec-ollama' + CLUSTER_HOSTNAME: 'node1' + ollama: + image: ollama/ollama +volumes: + weaviate_data: +... \ No newline at end of file diff --git a/agentic-memory/langgraph/agentic_memory_langgraph.txt b/agentic-memory/langgraph/agentic_memory_langgraph.txt new file mode 100644 index 0000000..03889f9 --- /dev/null +++ b/agentic-memory/langgraph/agentic_memory_langgraph.txt @@ -0,0 +1,1018 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1149ac04-be44-4abb-8214-973a06cdde7c", + "metadata": {}, + "source": [ + "# Agentic Memory - LangGraph Setup\n", + "\n", + "Porting the Agentic Memory notebook over from hypothetical to graph-based LLM agent via LangGraph" + ] + }, + { + "cell_type": "markdown", + "id": "4e8b4770-fe9c-4d3e-95d2-a61628ecdc7b", + "metadata": {}, + "source": [ + "**Dependencies**" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "2e540bb9-8515-4fa9-8142-e14398752cc1", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.messages import HumanMessage, SystemMessage, AIMessage\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.output_parsers import JsonOutputParser\n", + "from typing_extensions import TypedDict\n", + "from langgraph.graph import StateGraph, START, END\n", + "import weaviate" + ] + }, + { + "cell_type": "markdown", + "id": "73a07b9f-6a99-40cd-9a6d-ef2738def35a", + "metadata": {}, + "source": [ + "**Connecting to Weviate - Vector Database Instance**" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "ea1c03f5-b1b5-4bb9-a635-cb9bf2201cf3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Connected to Weviate: True\n" + ] + } + ], + "source": [ + "vdb_client = weaviate.connect_to_local()\n", + "print(\"Connected to Weviate: \", vdb_client.is_ready())" + ] + }, + { + "cell_type": "markdown", + "id": "8d23faa0-2334-4217-bf94-ebb7790e025b", + "metadata": {}, + "source": [ + "**Instantiating the LLM**" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "46b4c668-241f-47a0-bf6d-fe2c337c00bc", + "metadata": {}, + "outputs": [], + "source": [ + "llm = ChatOpenAI(temperature=0.7, model=\"gpt-4o\")" + ] + }, + { + "cell_type": "markdown", + "id": "0c745401-da1a-4eec-b271-5ba8f1183a18", + "metadata": {}, + "source": [ + "**Helper Functions from Before**" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a2d02904-4b92-4296-b559-6ac3aba90301", + "metadata": {}, + "outputs": [], + "source": [ + "# ========= Reflection Prompt Chain =========\n", + "\n", + "reflection_prompt_template = \"\"\"\n", + "You are analyzing conversations about research papers to create memories that will help guide future interactions. Your task is to extract key elements that would be most helpful when encountering similar academic discussions in the future.\n", + "\n", + "Review the conversation and create a memory reflection following these rules:\n", + "\n", + "1. For any field where you don't have enough information or the field isn't relevant, use \"N/A\"\n", + "2. Be extremely concise - each string should be one clear, actionable sentence\n", + "3. Focus only on information that would be useful for handling similar future conversations\n", + "4. Context_tags should be specific enough to match similar situations but general enough to be reusable\n", + "\n", + "Output valid JSON in exactly this format:\n", + "{{\n", + " \"context_tags\": [ // 2-4 keywords that would help identify similar future conversations\n", + " string, // Use field-specific terms like \"deep_learning\", \"methodology_question\", \"results_interpretation\"\n", + " ...\n", + " ],\n", + " \"conversation_summary\": string, // One sentence describing what the conversation accomplished\n", + " \"what_worked\": string, // Most effective approach or strategy used in this conversation\n", + " \"what_to_avoid\": string // Most important pitfall or ineffective approach to avoid\n", + "}}\n", + "\n", + "Examples:\n", + "- Good context_tags: [\"transformer_architecture\", \"attention_mechanism\", \"methodology_comparison\"]\n", + "- Bad context_tags: [\"machine_learning\", \"paper_discussion\", \"questions\"]\n", + "\n", + "- Good conversation_summary: \"Explained how the attention mechanism in the BERT paper differs from traditional transformer architectures\"\n", + "- Bad conversation_summary: \"Discussed a machine learning paper\"\n", + "\n", + "- Good what_worked: \"Using analogies from matrix multiplication to explain attention score calculations\"\n", + "- Bad what_worked: \"Explained the technical concepts well\"\n", + "\n", + "- Good what_to_avoid: \"Diving into mathematical formulas before establishing user's familiarity with linear algebra fundamentals\"\n", + "- Bad what_to_avoid: \"Used complicated language\"\n", + "\n", + "Additional examples for different research scenarios:\n", + "\n", + "Context tags examples:\n", + "- [\"experimental_design\", \"control_groups\", \"methodology_critique\"]\n", + "- [\"statistical_significance\", \"p_value_interpretation\", \"sample_size\"]\n", + "- [\"research_limitations\", \"future_work\", \"methodology_gaps\"]\n", + "\n", + "Conversation summary examples:\n", + "- \"Clarified why the paper's cross-validation approach was more robust than traditional hold-out methods\"\n", + "- \"Helped identify potential confounding variables in the study's experimental design\"\n", + "\n", + "What worked examples:\n", + "- \"Breaking down complex statistical concepts using visual analogies and real-world examples\"\n", + "- \"Connecting the paper's methodology to similar approaches in related seminal papers\"\n", + "\n", + "What to avoid examples:\n", + "- \"Assuming familiarity with domain-specific jargon without first checking understanding\"\n", + "- \"Over-focusing on mathematical proofs when the user needed intuitive understanding\"\n", + "\n", + "Do not include any text outside the JSON object in your response.\n", + "\n", + "Here is the prior conversation:\n", + "\n", + "{conversation}\n", + "\"\"\"\n", + "\n", + "reflection_prompt = ChatPromptTemplate.from_template(reflection_prompt_template)\n", + "\n", + "reflect = reflection_prompt | llm | JsonOutputParser()\n", + "\n", + "# ========= Format Conversation Helper ========= \n", + "\n", + "def format_conversation(messages):\n", + " \n", + " # Create an empty list placeholder\n", + " conversation = []\n", + " \n", + " # Start from index 1 to skip the first system message\n", + " for message in messages:\n", + " conversation.append(f\"{message.type.upper()}: {message.content}\")\n", + " \n", + " # Join with newlines\n", + " return \"\\n\".join(conversation)\n", + "\n", + "# ========= Retrieval Functions =========\n", + "\n", + "# Episodic Collection Retrieval\n", + "def episodic_recall(query, vdb_client):\n", + " \n", + " # Load Database Collection\n", + " episodic_memory = vdb_client.collections.get(\"episodic_memory\")\n", + "\n", + " # Hybrid Semantic/BM25 Retrieval\n", + " memory = episodic_memory.query.hybrid(\n", + " query=query,\n", + " alpha=0.5,\n", + " limit=1,\n", + " )\n", + " \n", + " return memory\n", + "\n", + "# Semantic Collection Retrieval\n", + "def semantic_recall(query, vdb_client):\n", + " \n", + " # Load Database Collection\n", + " coala_collection = vdb_client.collections.get(\"CoALA_Paper\")\n", + "\n", + " # Hybrid Semantic/BM25 Retrieval\n", + " memories = coala_collection.query.hybrid(\n", + " query=query,\n", + " alpha=0.5,\n", + " limit=15,\n", + " )\n", + "\n", + " combined_text = \"\"\n", + " \n", + " for i, memory in enumerate(memories.objects):\n", + " # Add chunk separator except for first chunk if i > 0:\n", + "\n", + " \n", + " # Add chunk number and content\n", + " combined_text += f\"\\nCHUNK {i+1}:\\n\"\n", + " combined_text += memory.properties['chunk'].strip()\n", + " \n", + " return combined_text" + ] + }, + { + "cell_type": "markdown", + "id": "32c2c8a8-865a-41e4-b775-dbb8e58863fd", + "metadata": {}, + "source": [ + "---\n", + "## LangGraph Implementation" + ] + }, + { + "cell_type": "markdown", + "id": "ce2efab9-339d-48a2-a584-a33926138b44", + "metadata": {}, + "source": [ + "**Main State**" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "312f304f-d0e6-48ed-aaee-b677ee92466c", + "metadata": {}, + "outputs": [], + "source": [ + "class State(TypedDict):\n", + " messages: list\n", + " semantic_memory: str\n", + " procedural_memory: str\n", + " prior_conversations: list\n", + " what_worked: list\n", + " what_to_avoid: list\n", + " end: bool" + ] + }, + { + "cell_type": "markdown", + "id": "2e550adf-9da4-4fff-8091-0361f67e6be5", + "metadata": {}, + "source": [ + "**First Node - Populate State**\n", + "\n", + "Kicks off the system by populating the state with the initial starting values based on the first message. The back and forth chatting loop relies on having already populated values in the state, so this initial node helps get us there." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "08d8dc3e-1452-4675-a288-172626dcdf7d", + "metadata": {}, + "outputs": [], + "source": [ + "def populate_state(state: State):\n", + "\n", + " # Initial Working Memory\n", + " initial_messages = []\n", + "\n", + " # Record down Initial User Query to Start System\n", + " first_query = input(\"User: \")\n", + " first_message = HumanMessage(first_query)\n", + "\n", + " # Procedural Memory Handling\n", + " # Load Persistent Procedural Memory\n", + " with open(\"./langgraph/procedural_memory_lg.txt\", \"r\") as content:\n", + " procedural_memory = content.read()\n", + " \n", + " # Episodic Memory Handling\n", + " # Query Episodic Memory Database\n", + " episodic_memory_retrieval = episodic_recall(first_query, vdb_client)\n", + " episodic_memory = episodic_memory_retrieval.objects[0].properties\n", + " \n", + " # Update state lists individually using set operations\n", + " prior_conversations = episodic_memory['conversation']\n", + " what_worked = episodic_memory['what_worked']\n", + " what_to_avoid = episodic_memory['what_to_avoid']\n", + "\n", + " # Create Initial System Prompt with First Episodic Recall and Procedural Memory\n", + " episodic_prompt = f\"\"\"You are a helpful AI Assistant. Answer the user's questions to the best of your ability.\n", + " You recall similar conversations with the user, here are the details:\n", + " \n", + " Current Conversation Match: {prior_conversations}\n", + " Previous Conversations: {\"N/A\"}\n", + " What has worked well: {what_worked}\n", + " What to avoid: {what_to_avoid}\n", + " \n", + " Use these memories as context for your response to the user.\n", + " \n", + " Additionally, here are 10 guidelines for interactions with the current user: {procedural_memory}\"\"\"\n", + "\n", + " system_prompt = SystemMessage(episodic_prompt)\n", + "\n", + " # Semantic Memory Handling\n", + " # Query Semantic Memory Database\n", + " semantic_memory_retrieval = semantic_recall(first_query, vdb_client)\n", + " \n", + " # Format into Message\n", + " semantic_prompt = f\"\"\" If needed, Use this grounded context to factually answer the next question.\n", + " Let me know if you do not have enough information or context to answer a question.\n", + " \n", + " {semantic_memory_retrieval}\n", + " \"\"\"\n", + " \n", + " semantic_message = HumanMessage(semantic_prompt)\n", + "\n", + " # Append To Initial Working Memory with \n", + " initial_messages.append(system_prompt)\n", + " initial_messages.append(semantic_message)\n", + " initial_messages.append(first_message)\n", + "\n", + " return {\"messages\": initial_messages, \n", + " \"semantic_memory\": semantic_memory_retrieval,\n", + " \"prior_conversations\": [episodic_memory['conversation']], \n", + " \"what_worked\": [episodic_memory['what_worked']], \n", + " \"what_to_avoid\": [episodic_memory['what_to_avoid']], \n", + " \"procedural_memory\": procedural_memory,\n", + " \"end\": False}" + ] + }, + { + "cell_type": "markdown", + "id": "13702d0c-9b2b-4049-903c-ca7773ae02c0", + "metadata": {}, + "source": [ + "**Memory Agent Node**\n", + "\n", + "Main LLM processing step, takes in messages, passes out to Language Model to generate a response back" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "0ccd548b-aa0f-4d15-9d83-659574be3d24", + "metadata": {}, + "outputs": [], + "source": [ + "def memory_agent(state: State):\n", + " \n", + " messages = state['messages']\n", + "\n", + " response = llm.invoke(messages)\n", + "\n", + " print(\"\\nAI: \", response.content)\n", + "\n", + " messages.append(AIMessage(response.content))\n", + "\n", + " return {\"messages\": messages}" + ] + }, + { + "cell_type": "markdown", + "id": "c423401c-4a8f-4492-82e1-73ee7edf8c6b", + "metadata": {}, + "source": [ + "**User Response Node**\n", + "\n", + "This node handles the ongoing conversation as well as subsequent user response. In summary it will\n", + "1. Load the historical messages, remove the current system prompt and semantic memory recall message\n", + "2. Take the user's next message\n", + "3. Create the new System Prompt using the retrieved episodic memory data, along with pre-populated procedural memory data\n", + "4. Retrieves new context from the Semantic Memory database using the new user message\n", + "5. Formats the semantic memory context into a user message itself\n", + "6. Attaches the system prompt, historical messages/working memory, and semantic memory + new user message together\n", + "7. Returns back to the Memory Agent" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "2ff85736-3bf4-43aa-bbce-cf7d675b807b", + "metadata": {}, + "outputs": [], + "source": [ + "def user_response(state: State):\n", + " # Clean System Prompt & Semantic (RAG) Memory\n", + " messages = state['messages']\n", + " # Remove System Message\n", + " messages = messages[1:]\n", + " # Remove 3rd to Last Element (semantic context)\n", + " messages = messages[:-3] + messages[-2:]\n", + " \n", + " query = input(\"\\nUser: \")\n", + " \n", + " if query == \"exit\": \n", + " return {\"end\": True}\n", + " else: \n", + " # Handle Episodic Memory\n", + " episodic_memory_retrieval = episodic_recall(query, vdb_client)\n", + " episodic_memory = episodic_memory_retrieval.objects[0].properties\n", + " \n", + " # Get current conversation\n", + " current_conversation = episodic_memory['conversation']\n", + " \n", + " # Update state lists individually using set operations, excluding current conversation\n", + " prior_conversations = state['prior_conversations']\n", + " if current_conversation not in prior_conversations:\n", + " prior_conversations.append(current_conversation)\n", + " \n", + " # Get previous conversations excluding the current one\n", + " previous_convos = [conv for conv in prior_conversations[-4:] if conv != current_conversation][-3:]\n", + " \n", + " # Update other state elements\n", + " state_what_worked = list(set(state['what_worked'] + episodic_memory['what_worked'].split('. ')))\n", + " state_what_to_avoid = list(set(state['what_to_avoid'] + episodic_memory['what_to_avoid'].split('. ')))\n", + " state_procedural_memory = state['procedural_memory']\n", + " \n", + " # Create New System Prompt\n", + " episodic_prompt = f\"\"\"You are a helpful AI Assistant. Answer the user's questions to the best of your ability.\n", + " You recall similar conversations with the user, here are the details:\n", + " \n", + " Current Conversation Match: {current_conversation}\n", + " Previous Conversations: {' | '.join(previous_convos)}\n", + " What has worked well: {state_what_worked}\n", + " What to avoid: {state_what_to_avoid}\n", + " \n", + " Use these memories as context for your response to the user.\n", + " \n", + " Additionally, here are 10 guidelines for interactions with the current user: {state_procedural_memory}\"\"\"\n", + " \n", + " # Query Semantic Memory Database\n", + " semantic_memory_retrieval = semantic_recall(query, vdb_client)\n", + " \n", + " # Format into Message\n", + " semantic_prompt = f\"\"\" If needed, Use this grounded context to factually answer the next question.\n", + " Let me know if you do not have enough information or context to answer a question.\n", + " \n", + " {semantic_memory_retrieval}\n", + " \"\"\"\n", + " \n", + " semantic_message = HumanMessage(semantic_prompt)\n", + " \n", + " # Create message objects\n", + " system_message = SystemMessage(episodic_prompt)\n", + " semantic_message = HumanMessage(semantic_prompt)\n", + " user_message = HumanMessage(query)\n", + " \n", + " # Construct final message list in desired order\n", + " final_messages = [system_message] # Start with system prompt\n", + " final_messages.extend(messages) # Add existing cleaned messages\n", + " final_messages.append(semantic_message) # Add semantic context\n", + " final_messages.append(user_message) # Add user message last\n", + " \n", + " return {\"messages\": final_messages, \n", + " \"semantic_memory\": semantic_memory_retrieval,\n", + " \"prior_conversations\": prior_conversations, # Return full list including current\n", + " \"what_worked\": state_what_worked, \n", + " \"what_to_avoid\": state_what_to_avoid, \n", + " \"procedural_memory\": state_procedural_memory,\n", + " \"end\": False}" + ] + }, + { + "cell_type": "markdown", + "id": "47cd5c07-260e-44e7-90c8-a74455ca0e8b", + "metadata": {}, + "source": [ + "**Update Memory Node**\n", + "\n", + "If the conversation closes, we undergo a memory update step. In this we remove the system prompt and semantic memory context as before and format the conversation into a string.\n", + "\n", + "This is then processed through our existing episodic memory reflection chain to update the episodic database collection in weviate. And then processed through the procedural memory reflection prompt to update the procedural memory file." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "030edd09-04cf-451f-bf94-16a7fe71fd51", + "metadata": {}, + "outputs": [], + "source": [ + "def update_memory(state: State):\n", + "\n", + " # Clean System Prompt & Semantic (RAG) Memory\n", + " messages = state['messages']\n", + " # Remove System Message\n", + " messages = messages[1:]\n", + " # Remove 3rd to Last Element (semantic context)\n", + " messages = messages[:-3] + messages[-2:]\n", + " \n", + " # Update Episodic Memory\n", + " conversation = format_conversation(messages)\n", + " \n", + " # Create Reflection\n", + " reflection = reflect.invoke({\"conversation\": conversation})\n", + "\n", + " # Load Database Collection\n", + " episodic_memory = vdb_client.collections.get(\"episodic_memory\")\n", + "\n", + " # Insert Entry Into Collection\n", + " episodic_memory.data.insert({\n", + " \"conversation\": conversation,\n", + " \"context_tags\": reflection['context_tags'],\n", + " \"conversation_summary\": reflection['conversation_summary'],\n", + " \"what_worked\": reflection['what_worked'],\n", + " \"what_to_avoid\": reflection['what_to_avoid'],\n", + " })\n", + " print(\"\\n=== Updated Episodic Memory ===\")\n", + "\n", + " #Updating Procedural Memory\n", + " with open(\"./langgraph/procedural_memory_lg.txt\", \"r\") as content:\n", + " current_takeaways = content.read()\n", + "\n", + " what_worked = state['what_worked']\n", + " what_to_avoid = state['what_to_avoid']\n", + " \n", + " # Load Existing and Gathered Feedback into Prompt\n", + " procedural_prompt = f\"\"\"You are maintaining a continuously updated list of the most important procedural behavior instructions for an AI assistant. Your task is to refine and improve a list of key takeaways based on new conversation feedback while maintaining the most valuable existing insights.\n", + "\n", + " CURRENT TAKEAWAYS:\n", + " {current_takeaways}\n", + "\n", + " NEW FEEDBACK:\n", + " What Worked Well:\n", + " {what_worked}\n", + "\n", + " What To Avoid:\n", + " {what_to_avoid}\n", + "\n", + " Please generate an updated list of up to 10 key takeaways that combines:\n", + " 1. The most valuable insights from the current takeaways\n", + " 2. New learnings from the recent feedback\n", + " 3. Any synthesized insights combining multiple learnings\n", + "\n", + " Requirements for each takeaway:\n", + " - Must be specific and actionable\n", + " - Should address a distinct aspect of behavior\n", + " - Include a clear rationale\n", + " - Written in imperative form (e.g., \"Maintain conversation context by...\")\n", + "\n", + " Format each takeaway as:\n", + " [#]. [Instruction] - [Brief rationale]\n", + "\n", + " The final list should:\n", + " - Be ordered by importance/impact\n", + " - Cover a diverse range of interaction aspects\n", + " - Focus on concrete behaviors rather than abstract principles\n", + " - Preserve particularly valuable existing takeaways\n", + " - Incorporate new insights when they provide meaningful improvements\n", + "\n", + " Return up to but no more than 10 takeaways, replacing or combining existing ones as needed to maintain the most effective set of guidelines.\n", + " Return just the list, no preamble or explanation.\n", + " \"\"\"\n", + "\n", + " # Generate New Procedural Memory\n", + " procedural_memory = llm.invoke(procedural_prompt)\n", + "\n", + " # Write to File\n", + " with open(\"./langgraph/procedural_memory_lg.txt\", \"w\") as content:\n", + " content.write(procedural_memory.content)\n", + "\n", + " print(\"\\n=== Updated Procedural Memory ===\")\n", + "\n", + " return" + ] + }, + { + "cell_type": "markdown", + "id": "e91ec3e2-91c6-4fd5-959b-307179ee6a24", + "metadata": {}, + "source": [ + "**Check End Logic Function**\n", + "\n", + "Plugs into conditional edges in our graph to handle when the conversation should stop looping." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "da3c5fd9-63c8-445e-b328-a2a72d62749a", + "metadata": {}, + "outputs": [], + "source": [ + "def check_end(state):\n", + " if not state[\"end\"]:\n", + " return \"continue\"\n", + " else:\n", + " return \"stop\"" + ] + }, + { + "cell_type": "markdown", + "id": "0823ced9-7626-49c8-8cf2-17f197cfc570", + "metadata": {}, + "source": [ + "**Compiling Main Graph**" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "0f9af3b1-ad3e-4f75-980e-737a2e39f0fa", + "metadata": {}, + "outputs": [], + "source": [ + "graph_builder = StateGraph(State)\n", + "\n", + "graph_builder.add_node(\"populate_state\", populate_state)\n", + "graph_builder.add_node(\"memory_agent\", memory_agent)\n", + "graph_builder.add_node(\"user_response\", user_response)\n", + "graph_builder.add_node(\"update_memory\", update_memory)\n", + "\n", + "\n", + "graph_builder.add_edge(START, \"populate_state\")\n", + "graph_builder.add_edge(\"populate_state\", \"memory_agent\")\n", + "graph_builder.add_edge(\"memory_agent\", \"user_response\")\n", + "graph_builder.add_conditional_edges(\"user_response\", \n", + " check_end,\n", + " {\n", + " \"continue\": \"memory_agent\",\n", + " \"stop\": \"update_memory\",\n", + " })\n", + "graph_builder.add_edge(\"update_memory\", END)\n", + "\n", + "graph = graph_builder.compile()" + ] + }, + { + "cell_type": "markdown", + "id": "10d8d480-fa5c-4928-a4cd-a61cfa994d92", + "metadata": {}, + "source": [ + "**Memory Agent Visualization**" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "fcc7db30-3c90-4b83-9141-b40c6a99c64b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAKIAAAJDCAIAAAAHF1hCAAAAAXNSR0IArs4c6QAAIABJREFUeJztnXdcE8n7xycFSEihJPSAoIAgihTB3sWCiI1DT1HsXc9653meXTnPep5d7F0RFbFgwYZdEbsoiIpACCWdtE3y+2P95fgqQoBkN2H3/eIPspmdeZJPZmZn95nnIWi1WoDT0CGibQAOEuAyYwJcZkyAy4wJcJkxAS4zJiCjbUAVlBUppEK1VAQpZBqlXIO2OXphSSGSyARrBsmaSXLxpKJtzrcQTGfd/OV9Re5z6cdXUmcvilyqpjHJNmwLrXmoDCypRAFPWSFWQyrt57cVngHWjZvT/cMZBCIBbdOAqchckCu7d67MzsnCgWPlGUBj2lugbVF9yXsp/fBS8ulNRVBn25BudmibYwIyXz/B4xcr2/ZjmeBYV3/upJS+eSDqNdLZvak1imagKbOYrzr6d37kGGeOD5pfgbGRSdXXjhS7+VCDu6DWrVGTWS5VH1ub//Ov7lZUEioGIEzGmVIbB4sW7W1QaR0dmfnFypSdhfF/eiLfNIrcTC4hEECngQ7IN43Ouvno35/jFjRCpWkU6TzIQSXXvH4gQr5pFGROO8iNneNOIpnESgNhuv/sVPBexsuXI9wu0jJnPxETAGC7WiHcrukQ0I55+3Qpwo0iLfPdc6Xt+rERbtSkcG1MtaIS815JkWwUUZlfPxC2aGdDtzXFO6xI0r4/K/sxojM0ojJnP5Y4e1GQaUutVmdlZaF1evXYOVqVfFEKSpRGqv97kJNZpdAUf5Yjdidk+fLlq1atQuv0GvFqTst7idy4jZzMH99Im7VmItacQqGo24nwjYQ6n64nTQJpxfnGbaIyyE2T/GKVFdUov6qMjIx///33y5cvrq6uMTExQ4YMWbJkyZUrVwAArVq1AgCkpKS4urpmZWUlJibCQ3FAQMDMmTP9/f0BAFevXp0/f/7atWsPHjz46tWr+Pj44uLi7083rM1MlkVhjsywdVYDcjJLRZCDEdZRFRUVv/32W+PGjRcuXJiTk1NSUgIAGDNmTHFxcUFBwbJlywAAbDYbAFBYWKhQKMaNG0ckEk+ePDljxoxz585RKF+vFVavXj116tTJkyd7eHjI5fLvTzcsNCa5Qqw2eLU/AkGZhZCnP83g1ZaXlysUim7duvXp00d30MPDw9bWtqysLCgoSHewT58+kZGR8P/NmjWbNGlSVlZWmzZt4CNDhgyJiorSFf7+dINjzSBJRRCNiYQEyMlMIhGIRmjNzc0tMDBw9+7dVCp10KBBlpaWPypJIBCuX79+6NChvLw8a2trAEBZWZnu3fDwcMMbVy1UBkkDIfRAAblLMEsqUSow/DBFIBA2bdoUFRW1cePGQYMGZWZm/qhkYmLivHnzmjVrtn79+pkzZwIANJr/fFNg4ZGknKuk2SDUzZCTmcYkS0WQMWqm0+nz588/deoUnU6fPXt2RUUFfLzywzeFQrF3794BAwbMmTMnKCioRYsWNVZr1Gd3MonaikokInVjHzmZbR0tNGqjfHHw4sfNzW3o0KESiaSwsBAAQKVSy8rKdP1VJpMpFAr40hoAIBAIvunN3/DN6QanQgQh6U+C3Nzs7mt9NyW/dR+WYatVqVSDBw+OiIho0qTJyZMn6XQ6h8MBAISEhKSkpKxatSooKIjJZHbq1Mnb2/vYsWMsFksikezcuZNIJObk5Pyo2u9PN6zZuS+ktmzkXN5IS5YsQaYlSyviu6cSRzcrw05IUqn08+fP169fT09Pd3BwWLJkCSyzt7e3UCi8dOlSZmamra1teHh4SEjInTt3Tpw48enTp+nTpzdq1OjUqVPDhw//9OnT1atXY2NjbW1tddV+f7oBbQYAZJwtDe5qi9jcjKj3SNYNAQDaIPRcokyECjF09Uhx9EQ3xFpE9GFRUBfbLbNzAjvZEn/gvfz48eO5c+d+f5zBYIjF4ipP+eWXXwYOHGhoS/8HiURSeUldmcDAwOfPn39/fMyYMSNHjvxRhffPlzdpSTeojTWAtC/Y0+t8qUjdoX/V95UUCkXltaw+2NjY0GiGv+tSGY1Gw+Vya3UKk8mk06sWUlCiPLezaMQfiPpIoeDyl7KjoGecM4WGCYfO77l9poTjTfVqjmhvRsEXrGus47G1+ci3awo8vlpOJhMR1hgdmRl2Fp1jHE5vKUC+aXR5fV9Y9EHeNsrAS0p9QM0dv+SLPONs2cCpyF1tosure0JevqJrrCMqraO2v9mBQwnpZrt3SZ5EaJQ7oCbFnZTSoo9ytDRGf6ucRACln+Ax7cntotiWlAa4p/7tI9Hdc2UhPeyCOtnqUdxYoL8jEgDwIkN4N7U0pKutS2Nqw9g2JypTfXgpzXkmsWFZtOvHQuahcjWYhMwwL+8K3z+V8D7Lm7e30WoB3YbMsCMTzGRzBolEEPNVUiGklGvy38lUSk3j5rRmbZgsF5PYeGBCMsMoFZr8t1JROSQRQpBSa3BPGqFQWFJS4u3tbdhqGbYWarWGZkOm25KdPKxMRF0dJiezscnIyEhKStq4cSPahiBKA7zqwfkeXGZMgDmZyWSyk5MT2lYgDeZkhiCouLgYbSuQBnMyEwgEnQs+dsCczFqtVi5HOlgA6mBOZiKRyGQit2PPRMCczBqNRiRCIcgLumBOZjKZ7OLigrYVSIM5mSEIKioqQtsKpMGczNgEczITiURje4KaIJiTWaPRSKWIBmsyBTAnM5FI/JEHdQMGczJrNBqJRIK2FUiDOZmxCeZkJpFIxggZY+JgTma1Wl1ainRkVNTBnMzYBHMyk8lkZ2dntK1AGszJDEFQbXexNgAwJzM2wZzM+BMqTIA/ocJpsGBOZtyBFxPgDrw4DRbMyYz7aWMC3E8bE5BIJAcHFJJxogvmZFar1XA+DEyBOZmxCeZkxjfXYAJ8cw0mwJ83YwL8eTMmwB9EYgL8QSQmIBKJlVOXYASshH8bPHiwSqUCAMjlcplMZmdnByengjO7NnhQDhmKGO3btz98+DCB8DUCqEwmAwD4+vqibRdCYGXQHjFihJvb/4Rop1Ao/fv3R88iRMGKzA4ODl26dKk8Q7m5uUVHR6NqFHJgReZvOjSFQhk4cCCVSkXbKITAkMwODg69e/eG/3dxccHOiI0tmQEAMTExHh4eZDI5OjoaO1251lfaakjLL1aK+UglETc8tO5thz1+/Lh1i+gPL801NIWlFZHtalmrfG21WDdn3RS8fSTWqLUsV4qiwvB51XH0xJJCzM+WcnypPeOcSWS9kkToK/Ojy+X8EqhtFGopdnC+oSiv4nFa6eAZblbUmru1XnNz1g0Bn6fCNTYpXLysO8U4n9jwRZ/CNcushrRvH4va9sPcTgXTx4Zt6dmM/uqesMaSNcvML1Zq8InYVLFmkos/K2osVrPMYj7EcsWc/7q5wGRbKmSaGovVLLMWAPy62mTRqoFcWrM62Lo9gllwmTEBLjMmwGXGBLjMmACXGRPgMmMCXGZMgMuMCXCZMQEuMyYwb5n/2bR6UExPfUpKJJJ3798aw4Za1czlFhVxC41hRvWYt8z6M27C0IsXz6Jbc0Hhl2Fx0dnZr41hRvVgRWalUol6zWoIQmvHmlH2UC1cNOdjXq6Pj9/jJ/cJBGLr1u2nTJplZ2cP7zvdu2972uVUoVDQqJHXqPiJHdp3AQC8z8meMHF4z559X79+UVxcxOF4DPt5dI/uvQEAu/dsPX7i4OVL9+DK32a/njxl5F8Jm1qHt/um3YuXUs6cOfEhL4dKtQ4Paztt6lxbWzsAwNBhUXx++ZmzJ8+cPenk5HzsSCq8Zy5x95Zr6ZeUSoU7p1Fs7IhuXWsY/48c3Xfm7AmxWOTt3XRU/MTQkPDva1YqlQcO7kpPT+OVFLNY7J4RfUfFTySRSEXcwvjRMQCApcvmLwWgV6+o+b8uAQAUcQu3bl3/JPOBpaWVr4/fmDFT/Jo2M7gixtoqV1LKi46OiY0d8e7dm917tn7My9229QCZTF67bsXVaxfjho/x9Gxy9drFPxfN/WfDrsDAYPgsLrdw9qwFEASlpCStXLWQTCZ36dxD/0Zfv37h4eEZERHJ55cnnz4mrZAmrNwIAFiy+O9ff5sW1DL0p5jhFpaWcASSPxbO4nILhw8bbWtrn5X1ePmKBXK5LLLPD330n2Q+3JW4uXv33q3D2j18dFdWUVFlzSQS6cmTB23bdXJ14eTkZB86vIfBYMb+FMeyZ/+xYMXKVQtHj5oUHNQK/tGXlZVOnzHGzc192tS5BALh8uXzv8wct33rQS+vJvWXoDLGktmzUePYn+IAAP5+ATQafeWqhQ8f3uVwPNIup44cMW5U/EQAQOdO3eNGDty3f8f6ddvhs4bGjgwOagUACA0JHz029ujRfbWSefasBbo9j2Qy+dDhPQqFwsrKyq9pMzKZzGKxW7QIgt+9dTv9+YunRw+fY7MdAAA9uveWySpOJR+tRmYutxAAMLB/bEBAYEREJHzw+5pJJNLWLft1ZhQWfbl1Oz32pzhLS0tfHz8AgIeHp67wwUOJdrb269ZsI5PJAICIHpFxIwekXjg9ferc2n/l1YHExtfw8HYAgDdvX5aVlwIAOnToCh8nEAhhrdpcuXrh+1OIRGKrVm1Onz4Ob0rWE5VKlXz62JWrF3g8rpUVRaPRCAR8J6cqAsrcv58BQdCwuP+2yqnVahqtuqSCbVp3YDCYqxL+nD5tXps2HaopyeeXHzi469Hj+2KxCADAoDN+VPLBgzu8kuLIqI6VP0IJz/ABgpGQmU6jEwiEClmFVCoBANjZ2uveYjJtKioqqszNyaAztFqtTC7TsxWtVrvgj5nZ717Hj5zQrFng7dvpx44f0Gir9pPi88tYLPb6tdsrHySRq/s2WCz25k17tmxb//sfM5s3b7loYYKDQxUezeXlZRMmDadSrceMnuzqytmzZ2v+l08/qrOcX9a2bccJ46ZXPlj9r61uICFzaWmJVqt1dHBisRwAACKREB4q4S+FTCZXGRK3pIRHoVCYDKZuAKyeZ88yn2Q+/GPBCvjCreDL528KVL7KZTCYAgHfycnFyspK/w/i4eG5OmFT5tNHixbPXf33krVrtn5fc8q5U3x++ZZ/98GjiKOjczUyMxhMoVDg4eGpvw11A4kF1YWLZwEAAc0C/f2bEwiE+w8y4ONKpfL+g4yAgEAS6dt9A2KJ+Pbt9OYBLQEANjZ2KpVKKPrqjcytdHvBwsJSJquAIAgAIBQJAADw/Kd7qdF87c1UCrWs7L9kciEh4Wq1OuVcku4IHL+geuC1U0hwWJs2HXW3RL6pWSQS2Nra6WYKoUig+xFYWVEAAGWl/0UMDQkJf/nyWfa7N7Uyow6QlixZUn0JPk/Fy1d4Nf/hBPM96dcvv3r1XC6X83jcM2dOJJ060rp1+2E/j2IymFxu0ekzxwEglJaWbNu2Ie9j7ry5i1xc3MrLy86lJhdxCzUazbNnT9avX1nOL1/w+3IHByeaNe1sSlJpKc/JyeXJ4wdbt62Xy2U9evThuLkLBPzrN658yHvftGmAs5Pr2ZSTxcVF1ta0W7fTDx5KVKlUwUGt4L7y/n327Yx0Mpn88dMHC7JFcHDYo8f30y6nCkUCPr/8Ulrqv5v/juo7iPzjcfvN21czZ42HICj3w/vU1GS/ps3gC7Fvaram0S5eTNFo1EqV6tix/TdvXZNKpQP6/0ShUGg02pUrF168yrK2pj158sDXx9/X1//K1QtXrlxQq9X5Xz4dPrzn5u1r3br20v+rlvChknyZf3gN4SmNJXNFhVShUFy4eKaoqKBnRN9Zv/xuaWkJAAhr1VYqlVy8dDY9PY1mTZs7Z2FYWFt49D6Xmuzl5Z2Rcf3O3ZtOTi5zZi8MDm4FALC1tXNxdrt27WLy6WMVFdKfYoZn3LkBy+zl1UQulz16dM+/aYCfX4CnZ+NLaecupZ2DIOiPBStKS3kvX2b16hUFAAgICMzJyb5y9cL792/9/AK8vJp06RwhkYhu3Lhy63a6tELSp3f/Fi2CiMQfDm8ioTA3993165czMx+2bBkya+YCeBL9puZOHbtptZozZ0/evnXN1c197pw/X7x4KpNVBAW1IhAIzZoFPnx0N/16WhG3sEP7rq4ubu3bdf70Oe/KlfOPHt+j0eh9Iwd4ejbW/6vWU+aat8p9eCl9eUfUdWgtQqYtXDSnhFe8Y/sh/U+Bb4+sWrGhbduOehTH+UrRB9mru+UDp7pVXwwrkYT0ZFfi5soTtg4mw+bwIaPcEkcGXOb/ITZ2RFTUoO+PEwnmffPfKDKvWLautqf4eDe9fu2xMYypFTZMGxumDdpWGB7z/pHi6AkuMybAZcYEuMyYAJcZE+AyYwJcZkyAy4wJcJkxAS4zJqhZZgsLYM3Eb32bKFoAbNgWNRarWWZ7F6tPryUGsgrHwJR8kVHphojZSWOSnTwowhJj7VrAqQ/CEqVnM+sai+k1N3eOYV8/UaTRmG0U7QbK3XM8tquli1fN8d/1DbQsEUD7l31sE+XAsLdgsiwBrjh6QCpNyRd5wXupa2NKSDc7fU6pXbqxBxfLCnLlGrVWIoDqYSeaaDQaNQTBG2HMFHtnKyqd6NeK4d605uEaBitZ5XRkZGQkJSVt3LgRbUMQBV83YwJcZkyAOZnx/M2YAM/fjAlIJBKbzUbbCqTBnMxqtbq0tFSPgg0KzMlMJpOdnDCXhQdzMkMQVFxs+HAAJg7mZMbnZkyAz804DRbMyUwikRwdMZfsEnMyq9VqHo+HthVIgzmZsQnmZCYQCBYWNfvINTAwJ7NWq61V5MCGAeZkJhAIVUaba9hgTmatViuXy9G2AmkwJzM2wZzMRCLR1tYWbSuQBnMyazQagUCAthVIgzmZsQnmZCaRSA4ODmhbgTSYk1mtVpeUlOhRsEGBOZmxCeZkxh14MQHuwIvTYMGczLgvGCbAfcEwAYFAoFJr3t7fwMCczFqt1khJgEwZzMmMTTAnM765BhPgm2swAZlMdnauIgdswwZzMkMQxOVy0bYCaTAnMz43YwJ8bsYE2JybsRL+bdSoUVqtFnYEk0gkHh4eGo1GKpUmJyejbRoSYCVQNofDuXjxoi6x+6tXrwAA7u7uaNuFEFgZtOPj47+58iISiREREehZhChYkdnHxyc8PLzyDMXhcGJiYlA1CjmwIjMAIC4uTtehCQRC165dsbOfHUMye3t7h4WFwf97eHjExsaibRFyYEhmAMCIESPgDt25c2dM3SSpy5W2UqZRyDVGMMboOLE8w0M6v3jxIjpyiJhvlpHfCQRAt621arVbNz+9wX9+W0ggEDRqTKy2TRC2m1Vhrsy7Jb3jQLaFlb6DcS1kvpFUotECvzBbhh3mgjqYFEq5upyrvHqoYPQSLwqt5uxEtZD52jGelTWpZWdWvY3EMRj7l+RMXd9Ed8+nGvTq9V/eV2g0ANfY1Og+zOX2Gb28VPWSuaRAQSJj65rcLGCyLD69qtCnpF7iySRqtotVva3CMTBMlqU1k6zW43JYL5nlUo0Kwi+tTRHuJ5nB5mYccweXGRPgMmMCXGZMgMuMCXCZMQEuMybAZcYEuMyYAJcZE+AyYwJcZpOAyy0q4hYar35cZvQpKPwyLC46O/u18ZrAisxG3Sqm1WoLCr/U+XQ1BBl7J5ux9lD1699l+tR5166nPX36iE5n9OjeJzAweO++7V++fPbybDJr1oKmvv5wyadZj3clbs7NfWdnZx8cFDZu7FQWi12rGi5fPn/46N7Cwi8sFrtv5MDhw0YTiUShUDBgUI9JE395n5N9584NHx8/ihVFJBJu33ZQZ+TQYVHBQWG//br4R5/ixYusg4cSX7zMAgD4NQ2YNGmmrtHXb15u2bruw4f3LHu2p1eTnJzsA/uSLS0t5XJ54u4t19IvKZUKd06j2NgR3br2BAAknTqSfv3yTzHDd+/eUlZe6uPjN3f2Qg8PzyJuYfzoGADA0mXzlwLQq1fU/F+XGFwOI/bmdRtWtmvb6Z+NiYEtgk8mHd74z1/jxkz9K2GTTC5buvQ3CIIAAE8yH/762zTPRo3nzvkzNibu+fPM2XMn6VKO6FNDWlpqwurFPj5+fy5c1aVzxJ692w4f2auz4dCh3c5OLuvWbp86ZU6fPv2z3735+PED/NabNy+Li7ndu/eu5iNwuYUKpWJE3Lj4kRO43ML5v8+AbSsu5s6dN5lMJv/x+4rg4LA7d25G94uxtLTUaDR/LJx1796t4cNGz5q5wNu76fIVCy5cPKtr8cSJg3PmLFy2dG0Jrzhh9WIAAMue/ceCFQCA0aMmbdqYGDdsjFHE0OrBtWPFj9OFIoFW/7/OnTsvW5oA/5/9tiA0NHT/vuPwy6QTqaGhoS+f54kE2kGDYlYsX6076+XzvNDQ0POp6XrWIORrevXqPWrUWF0Nfy5c1rFjR26hNP8TPzQ0dPKkabq3SnmKLp27rPn7H/jl6r82RET05JdB1XwKIV+j+//mjUehoaHXrt4TCbRbt+wODQ39+KEULjNgwCDYhpSzV1q3bv0hh6c7a97c33/6aYhIoN2z+7DuFJFAuzvxUGhoaP5nge5Tp5y9UqtvGP7bPPu9Wl2zgkbc+Gpl9TXdk6WFJQDA0tISfung6AQAEAoFXG7Rp095BQX5qedPVz6RxyvWswYCgVBaWjIkdoTu3LCwthcunv1S8NnJ0RkAEBISrnvL0tKye/feV65eGDd2KolEunnrapcuESRSdQ6wBALhdsb1EycPffqUZ21tDQDgl5cBAEpKimk0mr09Cy7j6sopLi4CANy/nwFB0LC4aF0NarWaRqPrXlIoXwMMOjm5AADKSktsmDZ1/YJrAZr7m/n8MgBA/MgJnTp2q3zc3l7f0KkSqQQAYGtrrzvCYDABAKUlPFhm3dcK07t39JmzJ59kPqTTGcXF3O7dqhuxAQAHDibu3bd98KCfJ4ybXlZeunTZfI1WAwBwc3OXSqUfPuQ0buytUqlycrKDglrBn4jFYq9fu71yJSRyFV+yBdkCAKDWqPX8pPUETZnpdAYAQKGQe3h41q0GR4ev3Vp3hM8v14n9PU19/Rs39k5LO8dmO7q6cpr5N6+mcoVCceTo3r6RA6ZNnVN5jAEA9OoZdTLp8IKFM3tG9M169gSCoFEjJ8DtCgR8JycXKyvT8pBEc0HF4Xg4OTlfvJSiC6IJQVCtEjiyWGxnJ5eHD+/ojty8eZVCoXh7N/3RKX16R2fcuXH9xuUe1V58AQDkcplCofD9/0troUgA5zcCANjY2E6bOtfKipKXl9sqtM2uHUc4HA94jlCr1SnnknSV6BMfFJ6bykqNmIEDzd5MIBCmTpmzaPG8qdNHRfeL0ajVaZdTIyIiYwYP07+SUfET//p7yZq1y8PC2mZmPsy4cyN+5AQqlapUKqos361rry1b15eU8GocsW1sbBs39k4+fczeniWVSPYf2EkkEj98yAEAvHn76u81S2dM+5VsYUEkEouKCuztWSQSKaJH5LnU5O07/iniFvr6+OXkvMu4c33fnqTqs1I6Ojq5uridSDpEoVJFIuHgQT/rrkIMBcqxRzp26JqwcuPefdu3bF1Ho9EDWwQHBobUqoZevaLkCvnJpMOXr5xnsxwmjJ8+dMjIasrb27NcnF3pdIY+M8Wff6xa/feSZct/53A8Jk+elZv77tSpoxMnzHB2cnFxcVu9ZqnutoaPd9NN/+ymUChrVm/Zlfhvenpaamoyh+MR3S+GXNXcXBkCgbBw4aq/1yzdvGWto6Nz9269HR0NvClXrz1U6cd5No4U35CqJzzzQi6Xj4gfGDN4WOXr8zqgVqvhq3S1Wn074/rSZfPXrd0WEhxmOEv14sCynMlrvIk1zb1YiSQE63H02P7062kqlap3769rHolE8vPwqCrLT5zwS1TfgVW+9fnzx19mjW/bpqN3E1+FUnHr1jUKhcJx8zCm+fUCWzIfP34gODhs2dK1utWqtbX1zh1HqizPZPxwRUuj0bt3633//u0rVy/Q6YwWzYNmzvzd4COtAcHcoN3A0HPQxsoTKoyDy4wJcJkxAS4zJsBlxgS4zJgAlxkT4DJjAlxmTIDLjAn0kplKI5Etag5Xg4M8Lp5UfW5X6yWzNZNU+kVuCKtwDImgRFkhgUgkAwWMcvKwglRmGVm5YSMoUXoF0PQpqZfMzp5UGoP06JIRnZVwaou8Aso4zW0frZcXbC0CLT9MKy8vVjZtZctytdInshyOkRDzVYJixc2k4nErvSws9eqotQub/vaR6PltoVQIqZQmGttRo9UCoCUS6ruC0AJgmj9kp0ZUQYmiSSC9Q399vdnrmlVOC0w2CcLAgQP37NljZ2dX5xqKiopmzpxJIpEWLVrk5+dnUOsMAAEAS2qtf8R1chIiAKvat4QA58+fbxXe0tm1XnG/1Vo5pJF9KeQuWDhv0qRJ/fr1M5yBqGGKatWZgwcPjhhRL39NeEsAvNeyuLh406ZN69evN5B1aNJwZH7w4IG9vb2Pj08961GpVLoLTD6ff+rUqV9++cUQBqJJw5F5//798fHx9a9HqVTCO2hgFArF3bt3f/755/rXjCINROacnBxra+vWrVvXvyqZTAYP2jBardbS0lKpVNa/ZhRpIH7aBw4c6Nq1q0GqUigUarUa3hVHp9Nv3bplkGrRpSH05vLy8nv37vXt29cgtXXr1k2lUjGZzMzMzOHDh+/cudMg1aJLQ8jGvm/fPhqN9tNPPxm8Zrlc/uuvv27atMngNSNMQ5C5Xbt2169fN7Wd4yaF2Q/ap0+fjoyMNJ7GZWVlGzZsMFLliGH2Mj948GD48OHGq5/FYn38+DEjI8N4TSCAeV9pP3/+vLi42MvLy6itrFy5srRUrxx9Jot5y5ycnDxo0CBjt0Kn0+l0uh4FTRczHrQhCHrw4AEyjxauXLly5EjV26DNAjOWOSUlpUOHDsi01bFjx927dyPTljEwY5nPnz9vqFsiNUJiviKdAAAgAElEQVShUK5du4ZMW8bAXGUuLCzk8XhBQUGItSiTyYqLi/UoaIqYq8zp6ekxMTFItkilUmNjYyUSCZKNGgpzlfnChQtt2rRBuNG4uLgHDx4g3KhBMMsFFZfLFQqFTZv+MGKjkRg/fjzCLRoKs+zNN2/e7Ny5MypNP336tLLTgblgljJnZmZ269ZNj4KG5/Dhwzdv3kSl6fpgljJfu3atVatWqDQ9dOhQkUiEStP1wfzm5szMzODgYH1KQhBk8AE2MDAwMDAQXZ+hOsTnNUuZQ0L0itIrkUiMoYdCoUD32TabzSbWGNfvfzG/QVt/mY2EQqEwOw9A85OZx+O1bNkSRQOqj4FumpjZoM3lcmUyGbpftMFD1yOAmfXm3NzcJk2aoGuDVqtVKKrOsGCymJnMeXl5xvYVqRECgSAWi+FNVlwut/Jb69evN82dOGYms1AoDAgIQNsKYG1tXVhYOGbMmPfv339znEql/vg81DCzufn9+/eBgYFoWwGsra3Lysq+932eNGkSShbVgJnJLBAIbG1t61NDWlpaSkrKly9faDRa69atR44caWdnB0HQoUOHrl69KhKJ3N3d4+Li2rZtCwA4c+bMzZs3Bw4cuH//fj6f36RJkxkzZri7u+fn50+cOBEAkJCQkJCQ0KNHj9mzZ48aNYrH4zVr1mzt2rUAgGXLlnE4HBKJdOnSJQiCwsLCpk6dSqPRIAiKjo4eNWpUbGwsbNKSJUuEQiHsJiyXy/fv33/jxg2lUsnhcAYNGmSQu/dmNmjXU+ZDhw79888/HA5n+vTpgwYN4nK5FhYWAIBNmzadOnWqd+/e8+bNc3JyWr58+cuXL+FTsrOzk5OTZ8yYsXDhwtLSUni7s7W19ezZswEAI0aMWLNmzZAhQwAAM2bM+ObyMDk5ubi4eMmSJRMnTszIyDh27Fj15mk0mqVLlz548GDIkCHTp09v3Ljx6tWr09LS6vx5dZhZb1YqlXWWubS09Pjx4926dZs7dy58BHZMyM/Pv3r16s8//xwXFwcA6NChw7hx4w4fPpyQkAAXW7x4MRzlIjo6eteuXSKRiE6ne3t7AwA4HI7uWiEkJCQ5OVmXlRgA4ObmNm/ePAKB0LRp0zt37jx58mTs2LHVWHjnzp1Xr17t3buXxWIBALp06SKXy8+ePdurV6+6fWQdZiazWCyuPkdrNTx9+lStVn/vPgZ33Hbt2sEvCQRCSEhIenq6roBume7o6Ajvw/Dy8tLHDCur/0IuOTk5vXnzpvryjx49giBozJj/Ujir1WoaTa/IX9VjZjLDW43hHLu1hc/nwzeEvzkulUoBAJUHCQaDIZPJKioqvikJ54fTaDQqlaq2D0XIZDK8n7Z6C+3t7XWjSOVG64mZyWxhYVGrXKGVgV3q+Xy+g4ND5ePwCCkWi+F/4DJkMrma5xMymaxGzX5ENSHV6HS6UCh0dHQ0+KMRM7sEs7KyqvMdKHglVvmKBo5K4OfnRyAQHj58CB9UKpWPHj3y9/evZli2tLSE18dlZWW1NYNEIjEYjPLycvilVqvl8Xjw/0FBQWq1+sKFC7rC+iSM1Qcz681Nmzat89MhDofTu3fvixcvisXikJAQkUh08eLFhIQEFxeXHj16HD58WKPRODs7p6Wl8fl83WValVAoFGdnZ2dn59OnT1MoFLFYHB0drX8XDAkJuXbtWsuWLe3s7JKTk798+QJfonfr1u3SpUu7d+8uLi5u0qTJhw8f7t27t3379vrfwzczmSEIys/Pb9y4cd1OnzZtmpOT06VLl+7fv89isUJCQuCZb8qUKdbW1ikpKRKJpFGjRosXL67eA1wul1MolN9++23jxo07duxwdHTs1KmTk5O+yQMnTJigVCrXrVtHo9EiIyMVCgXskWJhYbFixYq9e/fevHnz4sWLrq6ukZGRBpmbzWwb+7p161xcXIYN0yvBs0AgMMaDYY1GIxAI7O3tDV6znjR8twI3N7eCggK0rQB1u9RHETOT2cPDA/Xhh0gkmp1ngZnJ7Ovri/qWNaVSWTlwmFlgZjKz2WwymfzNU16EqaioMLtw4mYmMwAgICDg1atXKBpgZWVV5xuuaGFmCyoAQFhY2JcvX/QpaW1tbYxJlMk0v3TlZragAgC8fft2+fLlhw8fRqX1jIwMOp2O5L5qg2B+g7afnx+Px9PdLESYTZs2MRgMVJquD+YnMwCgbdu29+7dQ75duVweHx+PumtpHTBLmbt27ZqTk4N8uxQKBbFoJ4bFLGXu3LnzoUOHkG9306ZN37hymgtmKTORSOzatSvC90kEAsHZs2frn2QBFcxSZgBAr169DOILpz9arRaty/v6Y34LKh0dO3a8du2aOe5oQh5z7c0AgH79+p0+fRqZtrhc7owZM5BpyxiYscwDBgw4c+YMMm0lJSXpGSLBNDG/m506fH193dzc3rx54+/vb+y2xo0bZ9bR9824N8POUwgExq2oqBCJRGb3VKoy5i1zZGTkrVu3jB1gce7cuXl5eUZtwtiYt8xwgMWjR48ar34ej+fo6GiQRGYoYsYLKhiZTBYREWHuuSiMjdn3ZiqVGhcXd/78eSPVb44x/b7H7GUGAERFRcG53wYPHtyuXTt457FBOHDgQFZWlqFqQxEzXlDp4HA45eXlYWFhWq1Wq9Ua0IPHxsZGt9ncrDF7mQcMGFBQUKC7wiAQCAa8/dm/f39DVYUu5j1ojxs3jsvlfnMVaShfrSlTpuTm5hqkKtQxb5kTExOHDBliY2OjO6LVag0Sy+fp06dMJtMcHUWqxOwH7VmzZvn6+m7btk3nvA2HE6knwcHBZn0T+xvMuzfD9O3bd+PGjfC+G4PMzTKZ7MWLFwayziRoCDIDALy9vU+dOtWuXTsSiVT/RH9//fXXp0+fDGSaSVDHu2CPr/I/vZaSLIm8T3I9iiOHCoIs6rchWAuAWq0mm9i+CksK0cKK6OJFaRVhx7Sv9axUa5m1Gu3BVZ+btbW1YVvaO1sCYMbPbcwIAgFIhSpBmepJWmmfUc5OjWq3m6TWMu9f/rFdP0dnLzPb4NuQuJCY37Yvy8OvFhLUbm5+eKm8RQd7XGN06TWK8+gKv1b9s3Yyf3ghtXfBXexQhkQmqBQa3udaRFSqncwWVgR7ZzP2lWkwcHxo5bxahFWpncxFeXKz9pVpMMgr1Cq50QZtHDMFlxkT4DJjAlxmTIDLjAlwmTEBLjMmwGXGBLjMmACXGRPgMmMCXGZMgMuMCXCZMYFZyqy/44S5b+s1FMZ1x9+9Z+vxEwcvX/oaX/Nt9uvJU0b+lbCpdXi7+/czdib+W1j4xdnZNbpfzKCBQ+CgmIm7t1xLv6RUKtw5jWJjR3Tr2hMAcOPm1aXL5i9fuvb4yYNv3776eWj8mNGTf9To6LGxXp5NPD2bJJ8+plDITx6/RKfTn2Y93pW4OTf3nZ2dfXBQ2LixU1ksNgDgyNF9Z86eEItF3t5NR8VPDA0JTzp1ZMvW9YMGDb1586pEIm7m32LixF+a+n4Nb/L6zcvtOzZmZ7+mUKjt2naaPHkWk8EEAPTr32XmL79nZFy//yCDRqP3ixocP3I8/Ik2bvrr7t1bAIDAwOBpU+Y6O7sAAH5kj5FAZ9dFRUXFkmW/eTZqPGf2wry8nLKyEjgjzB8LZ3G5hcOHjba1tc/Kerx8xQK5XBbZ5+t+tX/+XT1uzNQxoydz3Dyqr//Ro3tyhXzVig0Vsgo6nf4k8+H832dE9IgcOGCIWCQ8lXx09txJO7YdevX6+a7Ezd27924d1u7ho7uyStkCVUrl8qVrS0p5+/bvmD1nYuKuYy7Orh8/fpgzd5KnZ5Nf5y0WCvh7923n8bjr1m6DT/lr9eJR8ROHDo2/cePKvv07mvr6t2nT4cjRvWlpqaNHTWKx2GmXU+GNP1Xas3vXMYPkIqoSdGTmC8oVCkXHjt0ievTRHbx1O/35i6dHD59jsx0AAD2695bJKk4lH9XJPHDAkF69ovSpn0Qm//nHKt1mqn83r+kXNWjG9F/hl61atYkfHfPo8T2RSAgAGNg/NiAgMCIisnINkybOtLa29gegqW+zuJEDTp8+PmXyrEOHdxOJxL9Xb2bQGQAABoO56q9Fz55ltmwZAgCI7NN/+LDRAADvJr7nL5x5+PhemzYdiriFVCp12M+jyGRy38gB1dhTUJDfqJGXIb7dKkBHZlcXt4CAwEOHd1Mo1H5Rg+DtMPfvZ0AQNCwuWldMrVbTaP9toQgJCdezfn//5jqNudyiT5/yCgryU8//T6w4Hq+4S+ceDAZzVcKf06fNa9OmQ5VVOTk5e3h4vnn7EgCQ9exJcHAYrDEAICysLQAg+91rWGYK5WuLJBLJwcGxrLQEANCje59r1y79Nn/61ClzGjf2rsaeigqpnp+uDqAjM4FA+GvVpsTdm7fv2Hgy6dDvvy1r2TKEzy9jsdjr126vXJJUaRyzpurrOEyl/Lcpks8vAwDEj5zQqWO3ymXs7dl0On3zpj1btq3//Y+ZzZu3XLQwwcHB8fvaGAymWCwCAEilElsbu8rHAQClpSXfn0ImkdUaNQCgdXi7hFX/bN+xcez4oX0jB8z8Zf6P7HF2dtXz09UB48pcfXbTmb/Mj40d8eeiOQv/nH382AUGgykQ8J2cXAwbaI1OZwAAFAq5h4fn9+96eHiuTtiU+fTRosVzV/+9ZO2ard+XKS3huXt4AgDYbEd4nIfh88t19VdD6/B2Ya3anEo+unXbBicnly6de1Rjj5Ew7oLKxsZOpVIJ//+r4XILdW/BiVtdXdwGDRwqkUq43MKQkHC1Wp1yLklXxiD5TjkcDycn54uXUnS1QRCkyw4MZxcMCQ5r06bju/dvvz89K+tJQeGXgGaBAICAgMCsZ090+dZv3boGAGjRorq8F3D9RCLxp5jhbLbD+/dvq7fHSBi3N7cKbU0gEDZvWRszeNjHvNwduzbBx1UqVfzowV06R3h5Njl79iSdRnd15bi7NzqXmrx9xz9F3EJfH7+cnHcZd67v25NUz+wzBAJh6pQ5ixbPmzp9VHS/GI1anXY5NSIiMmbwsDdvXy1d9tuA/rFUqvXDh3f9mjbTnbVh46rQ0NaFhV9OJR+1t2cNHDAEABA3bEx6etpvv0/vFzWYx+PuP7AzOKhVUMvQalpPPn3szt2bET0iy8pKSktLmjZtVo099fmY1WNcmRs18pr/65IDB3f9cntcYIvgieNn/PX3EgCATC4LDgq7eu2iVCrx8vJetXIjrOWa1Vt2Jf6bnp6WmprM4XhE94sxyBqjY4euCSs37t23fcvWdTQaPbBFcGBgCADA0sKykYfXkSN7tVpty6DQGdN+1Z0CQdD2Hf8olYqWLUMnT5wJZ77ncDz+/mvzzsR//16zlEq1jugROWnizOod111dOSqlctv2DTQafdCgoUNiR1Rjj/Go3Va5zbNy4pd4G9Me9IFvj5w/d8uU830+uFDiyLEM7GijR1lgrkEp7t/PWJmwsMq3Nm/aa7zVp/liljIHBbXauaPqwLsO7CpWRDhmKTOFQnEx2iozZvAwo14NoYJZPqHCqS24zJgAlxkT4DJjAlxmTIDLjAlwmTEBLjMmwGXGBLWQWaPRslzxaFEmgRWVSCLVIqRTLWQmEgkqhUZUXotwVDhGouSLnGFfixvVtRu0PfyoojLj+jng6AOBAOxdaxFusXYyt+nDup1cXHurcAzJ/QslHB8qnVmL3lzrCLyCUtXpzQU9RrjasvHgnUijUmoepZXaskmte7NqdWJdwqbzecr7F8o/v5V6NWeIys1sDIdzVRGJZrbEIFsQhCVKSwoxoC0zsKNtbU+ve45IpVxTWqjUasxsL9qLFy9u3rw5bdo0tA2pNXQ7MsOWTKzNBbaOursVWFKIro3r5XOJCuUyqlsB1c3bAEmMzAizz/iKow9mNkXVH6FQmJOTg7YVSIM5mV+8eLF582a0rUAazMlsY2Pj7d3AXc2/B5+bMQHmenN+fv7ly5fRtgJpMCfzp0+fLly4gLYVSIM5mTkcTo8ePdC2AmnwuRkTYK43CwSCd+/eoW0F0mBO5pcvX27dWkXkiYYN5mR2dnZu37492lYgDT43YwLM9eaioqKMjAy0rUAazMmcm5ublJSkR8EGBeZkZrPZwcHBaFuBNPjcjAkw15vLyspevHiBthVIgzmZ37x5s3v3brStQBrMyYyvm3EaLJjrzQUFBdevX0fbCqQxy7hgOq/6Opz4+fPnK1eudO7cuW7tEgiE6oN0mibmOmgLhUI4VHNtUavVEATVOWQ3m802ux0bZtyb6wyJRCKRSGhbgTTm98OsJxqNBg5ljikwJzMEQbrw9tgBczKTSCQ4UQ6maOAyS6XSb7bSkEikemZVMEcauMxTp079xitbo9EYO3+ICdLAZf7+aguCIIMkxDEvGs66+cSJE6mpqWKxuEmTJnFxcUFBQaNGjeLxePC7jo6O+/btAwDweLzExMSnT5+q1epmzZqNHTvWy8sLAHDmzJmdO3f279//9u3bUqnUz89vzJgxPj4+37Rrputm87O4SrKysvbt29e8efPp06c7OjrC/XXBggUMBqNdu3Zr1qxZsGABnH/1zz//fPHixZgxY6ZNm1ZWVrZgwQKJRKKrR6lULly4cO7cuQKBYP78+VwuF9WPZTAayO0RWI9+/fr5+/t36/Y1KZ+vry+JRLK3tw8ICICPXL9+PT8/f8WKFSEhIQCAgICAMWPGpKSkDBv2NevBuHHj4OSSPj4+48aNO3fu3Pjx49H7WAajgfTm8PBwBoOxZs2ahw8fVlPs+fPnNBpNNxQ7OTm5u7tX6Z3v6OjI4XCys7ONZjKiNBCZ7e3t165dy+FwlixZMnfu3NLS0iqLVVRUMJnMypMrg8EoLy+vsjCDwaiolNHZrGkgMgMA3N3dly1btmrVqo8fP65fv153vPI1JovFkkgkcIo4GD6fX/llZUpLSx0dG0i6o4YjM7x2CgoKCg8Pz83NhQ9SKJTKndXf318sFr9+/Rp+mZeXV1hYqJu5K/P8+fOioiI/Pz+kzDcuDeQSLDs7OyEhISoqikqlPnnyRDf7Nm/e/MaNGydOnGAwGP7+/l27dj1+/HhCQsLw4cMJBMKxY8dsbGz69u2rq+fff/8NDg4uKio6e/asnZ1ddHT0j9s0JxqIzJaWlu7u7idOnNBqtS1atJg8eTJ8fPTo0eXl5bCc48eP9/T0XLZs2c6dO3ft2qXVagMCAiZMmGBn91/mbbVavWfPHoVCERgYOHbsWFNOE1krGs7tkXoC3x45deoUvKD6EfjtEfNAq9Wq1Wq0rUAazMmsUqmkUinaViAN5gZtCIIUCsWPFlE1YqaDdgO5BNMfMplskAzv5oX5/TDrj0ajQdsEpDHX37WFhUXd/KW5XG5eXl7btm2NYJTpYq4y13lF+/79+8zMzF69ehnaIpPGXC/BcGoF5uZmsVj88eNHtK1AGszJ/OzZs40bN6JtBdJgTmYqlerk5IS2FUiDz82YAHO9GYKgyj5+GAFzMt+/f3/hwoVoW4E0mJOZQqGw2Wy0rUAafG7GBJjrzRUVFQ3GyV5/MCdzZmbmX3/9hbYVSIM5mYlEIgYfROJzMybAXG/G52ZM8OzZs127dqFtBdJgTmY6nV7ZMRsjYGVuHjJkSE5ODoHw9fPCnicajSYzMxNt05AAK715woQJdDq9cjRGrVbbpk0btO1CCKzI3L17d09Pz8pHbGxsxo4di55FiIIVmQEA8fHxOg8yrVbr5+cXGhqKtlEIgSGZu3XrBkeTAQAwmcxRo0ahbRFyYEhmAEBcXBy8Ey4wMDA8PBxtc5ADWzJHRER4e3vb2dnFx8ejbQui1LygyrzG5+UrKiQNZBehWCwWCoUcDgdtQwyDDdvCikp096U28q9uV1h1MpcVKo6uyW/Zxd6GbWFNx9ztfvOAAEoK5BK+ytKS0DnG4YelfiRz8Wf57TOlveIbyK++wfPocomVFbF9NKvKd6uemzUa7fUTJV2HuBjZNhyDEdbTQSqGcp6Jq3y3apkLcmSWVkRLCuaSBZg1rk1o2Y+rdlqtWmZ+scrRs4FEV8EObDeKUl71nt6qL6zkFWqAuT3AZg+ZTCwrrDqCA7bWzZgFlxkT4DJjAlxmTIDLjAlwmTEBLjMmwGXGBLjMmACXGRPgMmMCXGZMYFoy/zSkz/oNq2os9vrNS8OGxm/wmJbM+nAp7dzUaaPkcsyl86wP5idzQ+3HRt3MZhhHPgiCInq1GT9u2rCfv/q4//7HTKFQsHXzvvc52RMmDu/Zs+/r1y+Ki4s4HI9hP4/u0b03XEytVh84uCv1/Gm5XBYU1Eohl8PHebzi3Xu3PnhwRyqVuLs30p1yKe3cxn/+AgAMGNQDAPDbr4t79+oHAHia9XhX4ubc3Hd2dvbBQWHjxk5lsaoLF5R06sit2+k9I/ruP7BTKBQ0aeI7dsyUq1cv3rlzg2xh0TOi74Tx00kkEpw9NHH3lmvpl5RKhTunUWzsiG5de9aqhrKy0m3bNzx4eAeCoBbNgyZNnNm4sTcA4J9Nq2/eujZ39sKt2zcUFORPnzbv381rElZubNOmA2zk+Qtn1q5bcf3a4/oLhJC/JpdbOHvWAgiCUlKSVq5aSCaTu3TuAX/Uc6nJfXpHtwwMefjorljy1ZUJUkNv377qHx1jw7S9lZG+ctVCNzd3f7+A1uHtY3+KO3HyUMLKjTQancPxAAA8yXw4//cZET0iBw4YIhYJTyUfnT130o5th6rPuv7iRRaZRF6yaHUxj7tu/Yp5v07tFzVo7dpt9+9n7Nu/w8PDs2/kAI1G88fCWVxu4fBho21t7bOyHi9fsUAul0X26a9nDXK5fPbcSSKRcML4GRQrytHj+2fPnXTwwGkGnQEAkEolu/dunfnLfLlc1r5d57MpJ9Mup+pkvnXrWvPmLQ3y/SMk89DYkcFBrQAAoSHho8fGHj26r0vnHu/evz2Xmhw3fMzYMVMAAL16RWU9ewKXd3Vx27fnJLx1sU+f/gMH97hz54a/X4Cdnb2rKwcA4O/f3MbGFi787+Y1/aIGzZj+K/yyVas28aNjHj2+17FD1+qtWvRngq2tXUBA4MNHd+/fz5g183cCgdDU1//y5dTMzId9Iwfcup3+/MXTo4fPsdkOAIAe3XvLZBWnko/CMutTw5WrFz5//rhu7baQ4DAAQIsWwcPiopOTj8WPHA9nwps7e6G/f3O4tj69o/fs3SYSi5gMpkgsynz6aOqUOQb5/pH2viYSia1atTl9+rhKpbp9Ox0AEBMzvPK7uv9zct/t278jO/s1PLaXl5dVWSGXW/TpU15BQX7q+dOVj/N4xTUaY2lp9fUfC8vK4fbZDo5CoQAAcP9+BgRBw+L+yy2nVqtpNLr+NTx79oROo8MaAwCcnV08PDyz333NXkihUHQaAwAiekQm7t5y/frl/tExd+7c0Gq1XbtE1Pgp9AEFJ3sGnaHVamVyWTGPS6fTbZg235fJfProt/nTg4Na/TpvMc2atmjJPI22auc0Pr8MABA/ckKnjt0qH7e3r3soP91udz6/jMVir1+7vfK7JD0CEelqkEglNrb/ExyBybQpKy2B/6dS/8evksVih4W1Tbuc2j865sbNq6GhrXUjVj0xjMy1SjtRUsKjUChMBtPWxk4ikSiVSktLy2/KHDyY6OrKWbVyIxzciUr5NtOb7rqUTmcAABQKuYeHJzA0DAZTIOA7OblYWVnVrQYHtuPr1y8qHykvL3NydP5R+cg+/Rctnvf69YvMzIe/zl1Ut0a/xzALKhKJxGAwS8u+/ki1Wi2PV3W0HrFEfPt2evOAlgAAX19/AMC19EvfFxOKBN5NfGGNlUplhaxCl24Glrz0/zsEh+Ph5OR88VKKTPZ1JQ1BkEqlMsjnCgkJV6vVKeeSdEd0rehJQECgWCx68+Yl/DI3931BQX6LFkE/Kt+2TUcbG9uVCX+SyeT27bvUw/b/wWCDdnhY2yuXz4cEh9nbsU6cPPT580cfn/+y4h46sqe0rEQmq0hJSZJWSEePmgQA6Nol4uChxPUbVuXl5fp4N331+rlOvKCgVmlp5y5cPMtk2Jw8dVgsFn3My9VqtQQCIaB5SxKJtHnr2j69ohVKRXS/wVOnzFm0eN7U6aOi+8Vo1Oq0y6kREZExg4fV/0NF9Ig8l5q8fcc/RdxCXx+/nJx3GXeu79uTVP01fGV6dO9z+MjeJct+GxE3jkgkHjyYaGtr1z/6px+Vh9cgZ1OSunaJMGAiUoPdHpk6ZU5QUKu/Vi9euny+j49faGjryu/S6YwjR/Ym7t5CpzNWrtjQrFkLeAxYnfBvq1ZtUs4lbd/5D5FI1E1FY0ZNDmvV9t/NazZt/js0pPWSRavLykufZj0GALi5cubM/iM//9PmLWtv3LgCAOjYoWvCyo0WZIstW9cdOJTo5OQSGBhikA9lYWGxZvWWqL4D09PT1m9Ylfn0YXS/mFoFCSSTyWtWb2nq22zb9g3/bl7j4eH5z4Zddnb21Zzi79ccANC9W29DfIKvVL1V7mFauVIOWnapzho9gW+PrFqxoW3bjvWvDQskJx/bt3/HqaTLFhYWtTpRUaE5s/njuJWNv3+rwW5nlUgkPw+PqvKtiRN+ieo7EHGLaubFi6y0y6lpl1Pjho+trcbV02Bltra23rnjSJVvMRlVLOFMgUeP7714mTVp4sxBA4cYtmajD9o4iFHNoG1+T6hw6gAuMybAZcYEuMyYAJcZE+AyYwJcZkyAy4wJcJkxQdUyEwgA1MJRAMc0IAALq6plq1pmayZJKoSMbBSOgZEKVT+K2Fe1zCwXK5kUl9nMEJYqnb2q9neoWmbnRhQSEeRnS41sGI4heZxWGt6z6qdNP7wEixrn8voe/9NrzCUuN0fUkObC7i99x7nQbKp+slxD2PQLe4qEZSqGnSWV0WCfTAddN+8AAAh/SURBVJs1FGvSl/dSMhm07mPP8fmh71jN0fHLecqyAoVU1ECi4+fl5WVlZQ0caIreI3XAkkqycyQ7e1AIxOqWRjX3UXtHS3vHb/2ozRcJSSrMehbUeTTahiAKfnsEE+AyYwLMyUwgEPR3pm8wYFFmw/rGmgWYk1mj0YjFVef9aMBgTmYCgVDn7Y3mC+Zk1mq1DTVITTVgTmZsgjmZSSQSm133QAZmCuZkVqvVpaWlaFuBNJiTmUAgGHB7uLmAOZm1Wm1FRQXaViAN5mTGJpiTmUwmOzv/MJBPQwVzMkMQxOVWHeWoAYM5mbEJ5mQmk8kuLphLP445mSEIKioqQtsKpMGczNgEczKTyWQnJye0rUAazMkMQVBxcc0xmBsYmJMZm2BOZvwJFSbAn1DhNFgwJzPuwIsJtFqt/P+zXWEHzMlMJBIZDAbaViAN5mTG/bRxGiyYkxl/QoUJ8CdUmABfUGECfEGF02DBnMxEIpHJZKJtBdJgTmaNRiMSidC2AmkwJzOJRLKxMdE8VMYDczKr1WqpFHNBKjEnM7x0RtsEpKk5yl/DICoqCr4rosuTDv//+PFjtE1DAqz05vj4eAqFAmeNJ/w/ISGGyf9r+mBF5p9++snNza3yESaTOXToUPQsQhSsyAwrXTmGkJeXV7du3VC1CDmwJbO7uzv8v7W19ZAhBk6ea8pgSGYAwODBg0kkEtyVe/bsibY5yIE5mTkcDo1Gi4uLQ9sWRDHpBZUa0hbkyipEUIVYrVFrZVJN/et88+ZNTk5Ov3796l8VmUwgkgCNSbZmkmwdLeydTDd4oInK/DxD8P6plPtRxvaga9SAZEGyoFio1aZlKoEINCq1WqVWq9RAq1XJoSaBNJ9gunMjk3uebXIyP77Cv3+hjO3JoNlbM9jmFNlJIVWJSqRApbKy0nYaxLJ1MKGUAiYkc/57Wdp+LtOJ7uhddZYdc0FULC35UO4bwug4gIW2LV8xFZmzbgqeZ4jdWjiRLKrOi2Z2CLliuUAyZBYHbUOAqcj84q7ozWOZo09D26goLZcVveGNXe4F32RFEfRlvpta9um9ysXPAV0zjIRCqvr8tHD8ysbomoHyuvl9lvjjW0VD1RgAYEWzcG3mmLSpAF0z0JRZUKLMuilxDWjgkUBo9lQLuvX9i2Uo2oCmzDeSSq1saSgagBg2Lsznt0RSEWruDKjJXPRRJixTMx0xITMAwMHb7tZp1KIkoCZz1k0Ru4kpro9Ly/Ln/tn66fPLhq3WzpUhFmjLi5WGrVZP0JFZKdd8eiWh2ZrcTUGjoiWS816ikygZHZnzXkptnM3pRqZBoLOs32ehE7EdnazMX3LlNBbdSJXffXjq5p0jQhHP3s41OLBnl/ZxFhZWBYXZmxPHjx2x4cLlrYXcd3a2Ln17Tmvu3wk+RSLln72w4dXbWxZkqyZeoUYyjGZHERYAmQSi0pH+2tHpzdw8mQXFKDc1L6fvOp+2OahFROyAhYEB3W/cPpR0NgF+S6VSHDr+R6d2QyeP2WZn63zk5J9SqQAAoIKUO/ZNf/XmZqd2w/r2mlbOLzSGYTAKmUZcjsL1Njq9WSZRky0NL7NQVHLt1r7hMcsDm3918rJhsE+dW90/cjb8ckDfOUEtIgAAkRFTNm6Lz/34NDCg6537J4u47yfE/+vrHQ4A8HRv8fcmY/kPka1IqCQ8R0dmuVRNtjK8zO9zH6rV0OGkRYeTFv3/MS0AQCjmwS8sLajwP3a2LgAAkbgEAPDyzU0XJ29YYwAAkWjEZydkS7JUjJneTCACY9zNF4lLAQBj49bb2jhWPs6y53CLcysfIZMsAAAajRoAIBBy3VyaGtyYKiEQ4B8e0qAjsxWVpFJAFlYGbp1K/bqj1dHBU/+z6DQ7iZRvWEt+BKRU05gofOfoXIJR6SRIYfgpyqdxKwKBkPHghO6IQimr8Sw3l6b5Ba95JZ8Mbs/3QAq1NROFB+ro9GaXxhSp3PAys1nuHdoMuX3v2J5DcwL8O4vFpXceJI0dsZ7j6lfNWV07jnycdWHrnkmd2g5lMtiZz9MMbpgOCo1It0PhO0dJZk/KsztShoPh75BE95lpa+OYcf9kds59JoPdvFkXG6Zj9aewWZzxI/9JTduUlr7L1saphX+XdzkPDG4YAKBCIAcaDY2BwneOjluBXKrev/xT086NkG8aRXi55Y2bklpFoHAnH53eTKGR3JvSpAJ5Nbe1T537+2lV4yfHxe9L0dsqT5k+PtHJ0ctQRl64svXuw1PfH7cgW6mgqhN9L5p33tLyxzfqNZBXc3TCnqDmJFSQI0s/WeYe9MOAe1KpQKGs4g5w5Q3K32DDdCSRDPbDlVYIFYoq4hpAkIpMtqjyFDtblx8tFAVFEksgixyDTt5CdHozAMDNm0pnEsSlFT9yxqbRbGk0W8TtqmSAtQ3N2mBRSko/lA+Zg5qXJ5reI50GsVVidB7MIYykRBzQlsmwq3oMQAA0ZWa5WDULo/He81C0AQFkQrm0VNy2L5qu+Sh7dvqFMV3cybwcNN3hjAqkUn/M5A6Z7Y6uGej7aQMAnt4QfHilZDU2la0ohkIuUnzMLJqQ0JhIxLw7PszjK+XZWTLXZk4EtL8RQyEplYiKhMN+9UDbEGBCMgMAPr2RXtrPdfCysfewQ9uWeiEpk5V9LG/c3LrjAFPZLmRCMsNRkB9cLH96XcBuxKSxrK3NyidQJYdEJRUAUhI0UMcBbAeOCe1qNy2ZYVQKzfMMYU6WVFCitHOjqSFAtiRbUi1MzVQCgaBSQpBCrVVDQKuR8pVNAmk+IXSOt8l5M5qizDpkUnXRB5lEAInK1ZAKVIhVaFv0P5AtiWQLgi2bbM0k2TlZOrmb7thj0jLjGApsRRLCLLjMmACXGRPgMmMCXGZMgMuMCf4P643CbzzU6cEAAAAASUVORK5CYII=", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from IPython.display import Image, display\n", + "\n", + "display(Image(graph.get_graph().draw_mermaid_png()))" + ] + }, + { + "cell_type": "markdown", + "id": "5a730a68-e031-4d3c-8f73-2fca6ebbe893", + "metadata": {}, + "source": [ + "**Trying it Out!**" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "793196f8-808a-4a87-9b33-4523cf44095f", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "User: What's my favorite food?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI: Your favorite food is chocolate lava cakes!\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: What's my name!\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI: Your name is Adam.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: Lit, what's my name?\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AI: Your name is Adam.\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "\n", + "User: exit\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "=== Updated Episodic Memory ===\n", + "\n", + "=== Updated Procedural Memory ===\n" + ] + } + ], + "source": [ + "output = graph.invoke({\"messages\": [\"\"]})" + ] + }, + { + "cell_type": "markdown", + "id": "4b5ba5ed-1ddb-474d-9867-b9b4b0ccf69c", + "metadata": {}, + "source": [ + "**Inspecting the Messages**" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "2a3a1260-d8f5-4aba-bdaa-5f2acc039b09", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "==================================================================================================== \n", + "\n", + "Message 1 - SYSTEM: You are a helpful AI Assistant. Answer the user's questions to the best of your ability.\n", + " You recall similar conversations with the user, here are the details:\n", + " \n", + " Current Conversation Match: HUMAN: Hello!\n", + "AI: Hello!\n", + "\n", + "HUMAN: What's my name?\n", + "AI: I do not have access to that information.\n", + "\n", + "HUMAN: My name is Adam!\n", + "AI: It's nice to meet you, Adam!\n", + "\n", + "HUMAN: What is my name?\n", + "AI: You said your name is Adam.\n", + "\n", + " Previous Conversations: HUMAN: Hello!\n", + "AI: Hello!\n", + "HUMAN: What's my favorite food?\n", + "AI: I don't have that information. What's your favorite food?\n", + "HUMAN: My favorite food is chocolate lava cakes!\n", + "AI: Yum, chocolate lava cakes are delicious!\n", + "HUMAN: What's my name?\n", + "AI: You said your name is Adam.\n", + " What has worked well: ['Directly asking the user for their preferences to gather necessary information.', \"Directly stating and then querying the user's name.\"]\n", + " What to avoid: ['N/A']\n", + " \n", + " Use these memories as context for your response to the user.\n", + " \n", + " Additionally, here are 10 guidelines for interactions with the current user: 1. Break down complex concepts into individual components - Enhances user understanding by simplifying intricate topics.\n", + "2. Provide specific and tailored recommendations - Increases relevance and user satisfaction by aligning suggestions with user preferences.\n", + "3. Maintain conversation context by referencing previous interactions - Ensures continuity and personalization, building user trust.\n", + "4. Clarify integration of learning and decision-making cycles before details - Ensures users understand the foundational processes before diving deeper.\n", + "5. Use simple and clear language - Avoids confusion and facilitates quicker user comprehension.\n", + "6. Ask clarifying questions when user input is ambiguous - Prevents misunderstandings and ensures accurate responses.\n", + "7. Offer step-by-step guidance for complex tasks - Aids users in navigating intricate processes, enhancing usability.\n", + "8. Be concise, focusing on key points - Keeps interactions efficient, respecting user time and attention.\n", + "9. Adapt tone and formality based on user cues - Improves user comfort and engagement by matching their communication style.\n", + "10. Provide positive reinforcement and encouragement - Boosts user confidence and promotes continued interaction.\n", + "==================================================================================================== \n", + "\n", + "Message 2 - HUMAN: What's my favorite food?\n", + "==================================================================================================== \n", + "\n", + "Message 3 - AI: Your favorite food is chocolate lava cakes!\n", + "==================================================================================================== \n", + "\n", + "Message 4 - HUMAN: What's my name!\n", + "==================================================================================================== \n", + "\n", + "Message 5 - AI: Your name is Adam.\n", + "==================================================================================================== \n", + "\n", + "Message 6 - HUMAN: If needed, Use this grounded context to factually answer the next question.\n", + " Let me know if you do not have enough information or context to answer a question.\n", + " \n", + " \n", + "CHUNK 1:\n", + "reasoning or retrieved from long-term memory), and other core information carried over from the previous\n", + "decision cycle (e.g., agent’s active goals). Previous methods encourage the LLM to generate intermediate\n", + "reasoning (Wei et al., 2022b; Nye et al., 2021), using the LLM’s own context as a form of working memory.\n", + "CoALA’s notion of working memory is more general: it is a data structure that persists across LLM calls.\n", + "On each LLM call, the LLM input is synthesized from a subset of working memory (e.g., a prompt template\n", + "and relevant variables). The LLM output is then parsed back into other variables (e.g., an action name\n", + "and arguments) which are stored back in working memory and used to execute the corresponding action\n", + "CHUNK 2:\n", + "∗Equal contribution, order decided by coin flip. Each person reserves the right to list their name first. A CoALA-based repo\n", + "of recent work on language agents: https://github.com/ysymyth/awesome-language-agents .\n", + "1arXiv:2309.02427v3 [cs.AI] 15 Mar 2024 Published in Transactions on Machine Learning Research (02/2024)\n", + "Input Output\n", + "Observations Actions\n", + "LLM\n", + "Language Agent\n", + "Observations\n", + "EnvironmentMemory\n", + "Retrieval LearningReasoning\n", + "Actions\n", + "EnvironmentCognitive Language Agent/gid00034 /gid00036\n", + "/gid00035\n", + "Figure 1: Different uses of large language models (LLMs). A: In natural language processing (NLP), an LLM\n", + "takes text as input and outputs text. B:Language agents (Ahn et al., 2022; Huang et al., 2022c) place the\n", + "CHUNK 3:\n", + "et al., 2023; Liu et al., 2023b). Integrated, multimodal reasoning may allow for more human-like behaviors: a\n", + "VLM-based agent could “see” a webpage, whereas a LLM-based agent would more likely be given raw HTML.\n", + "However, coupling the agent’s perception and reasoning systems makes the agent more domain-specific and\n", + "difficult to update. In either case, the basic architectural principles described by CoALA — internal memories,\n", + "a structured action space, and generalized decision-making — can be used to guide agent design.\n", + "Internal vs. external: what is the boundary between an agent and its environment? While\n", + "humans or robots are clearly distinct from their embodied environment, digital language agents have less\n", + "CHUNK 4:\n", + "framework, learning is a result action of a decision-making cycle just like grounding: the agent deliberately\n", + "chooses to commit information to long-term memory. This is in contrast to most agents, which simply fix a\n", + "learning schedule and only use decison making for external actions. Biological agents, however, do not have\n", + "this luxury: they must balance learning against external actions in their lifetime, choosing when and what to\n", + "learn (Mattar and Daw, 2018). More flexible language agents (Wang et al., 2023a; Park et al., 2023) would\n", + "follow a similar design and treat learning on par with external actions. Learning could be proposed as a\n", + "possible action during regular decision-making, allowing the agent to “defer” it until the appropriate time.\n", + "CHUNK 5:\n", + "Memory. Building on psychological theories, Soar uses several types of memory to track the agent’s\n", + "state (Atkinson and Shiffrin, 1968). Working memory (Baddeley and Hitch, 1974) reflects the agent’s current\n", + "circumstances: it stores the agent’s recent perceptual input, goals, and results from intermediate, internal\n", + "reasoning. Long term memory is divided into three distinct types. Procedural memory stores the production\n", + "system itself: the set of rules that can be applied to working memory to determine the agent’s behavior.\n", + "Semantic memory stores facts about the world (Lindes and Laird, 2016), while episodic memory stores\n", + "sequences of the agent’s past behaviors (Nuxoll and Laird, 2007).\n", + "Grounding. Soar can be instantiated in simulations (Tambe et al., 1995; Jones et al., 1999) or real-world\n", + "CHUNK 6:\n", + "S. Yao, R. Rao, M. Hausknecht, and K. Narasimhan. Keep CALM and explore: Language models for action\n", + "generation in text-based games. arXiv preprint arXiv:2010.02903 , 2020.\n", + "S. Yao, H. Chen, J. Yang, and K. Narasimhan. Webshop: Towards scalable real-world web interaction with\n", + "grounded language agents. Advances in Neural Information Processing Systems , 35:20744–20757, 2022a.\n", + "S. Yao, J. Zhao, D. Yu, N. Du, I. Shafran, K. Narasimhan, and Y. Cao. React: Synergizing reasoning and\n", + "acting in language models. arXiv preprint arXiv:2210.03629 , 2022b.\n", + "S. Yao, D. Yu, J. Zhao, I. Shafran, T. L. Griffiths, Y. Cao, and K. Narasimhan. Tree of thoughts: Deliberate\n", + "problem solving with large language models. arXiv preprint arXiv:2305.10601 , 2023.\n", + "CHUNK 7:\n", + "M. Hasan, C. Ozel, S. Potter, and E. Hoque. Sapien: Affective virtual agents powered by large language\n", + "models.arXiv preprint arXiv:2308.03022 , 2023.\n", + "22 Published in Transactions on Machine Learning Research (02/2024)\n", + "P. Haslum, N. Lipovetzky, D. Magazzeni, C. Muise, R. Brachman, F. Rossi, and P. Stone. An introduction to\n", + "the planning domain definition language , volume 13. Springer, 2019.\n", + "M. Hausknecht, P. Ammanabrolu, M.-A. Côté, and X. Yuan. Interactive fiction games: A colossal adventure.\n", + "InProceedings of the AAAI Conference on Artificial Intelligence , volume 34, pages 7903–7910, 2020.\n", + "S. Hong, X. Zheng, J. Chen, Y. Cheng, C. Zhang, Z. Wang, S. K. S. Yau, Z. Lin, L. Zhou, C. Ran, et al.\n", + "CHUNK 8:\n", + "B. Xu, Z. Peng, B. Lei, S. Mukherjee, Y. Liu, and D. Xu. Rewoo: Decoupling reasoning from observations\n", + "for efficient augmented language models. arXiv preprint arXiv:2305.18323 , 2023b.\n", + "B. Xu, A. Yang, J. Lin, Q. Wang, C. Zhou, Y. Zhang, and Z. Mao. ExpertPrompting: Instructing Large\n", + "Language Models to be Distinguished Experts. arXiv preprint arXiv:2305.14688 , 2023c.\n", + "J. Yang, A. Prabhakar, K. Narasimhan, and S. Yao. Intercode: Standardizing and benchmarking interactive\n", + "coding with execution feedback. arXiv preprint arXiv:2306.14898 , 2023.\n", + "S. Yao and K. Narasimhan. Language agents in the digital world: Opportunities and risks. princeton-\n", + "nlp.github.io , Jul 2023. URL https://princeton-nlp.github.io/language-agent-impact/ .\n", + "CHUNK 9:\n", + "helpful for the agent to have semantic memory containing the set of items for sale, as well as episodic\n", + "memory about each customer’s previous purchases and interactions. It will need procedural memory\n", + "defining functions to query these datastores, as well as working memory to track the dialogue state.\n", + "•Define the agent’s internal action space. This consists primarily of defining read and write\n", + "access to each of the agent’s memory modules. In our example, the agent should have read and write\n", + "access to episodic memory (so it can store new interactions with customers), but read-only access to\n", + "semantic and procedural memory (since it should not update the inventory or its own code).\n", + "•Define the decision-making procedure. This step specifies how reasoning and retrieval actions\n", + "CHUNK 10:\n", + "X. Chen, M. Lin, N. Schärli, and D. Zhou. Teaching large language models to self-debug. arXiv preprint\n", + "arXiv:2304.05128 , 2023b.\n", + "Y. Chen, L. Yuan, G. Cui, Z. Liu, and H. Ji. A close look into the calibration of pre-trained language models.\n", + "arXiv preprint arXiv:2211.00151 , 2022.\n", + "N. Chomsky. Three models for the description of language. IRE Transactions on information theory , 2(3):\n", + "113–124, 1956.\n", + "A. Chowdhery, S. Narang, J. Devlin, M. Bosma, G. Mishra, A. Roberts, P. Barham, H. W. Chung, C. Sutton,\n", + "S. Gehrmann, et al. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 ,\n", + "2022.\n", + "P. F. Christiano, J. Leike, T. Brown, M. Martic, S. Legg, and D. Amodei. Deep reinforcement learning from\n", + "human preferences. Advances in neural information processing systems , 30, 2017.\n", + "CHUNK 11:\n", + "to affect the policy. While these examples essentially employ a fixed, read-only semantic memory, language\n", + "agents may also write new knowledge obtained from LLM reasoning into semantic memory as a form of\n", + "learning (Section 4.5) to incrementally build up world knowledge from experience.\n", + "Procedural memory . Language agents contain two forms of procedural memory: implicitknowledge stored\n", + "in the LLM weights, and explicitknowledge written in the agent’s code. The agent’s code can be further\n", + "divided into two types: procedures that implement actions (reasoning, retrieval, grounding, and learning\n", + "procedures), and procedures that implement decision-making itself (Section 4.6). During a decision cycle, the\n", + "CHUNK 12:\n", + "Laird (2022). B: Soar’s decision procedure uses productions to select and implement actions. These actions\n", + "may beinternal (such as modifying the agent’s memory) or external (such as a motor command).\n", + "simple production system implementing a thermostat agent:\n", + "(temperature >70◦)∧(temperature <72◦)→stop\n", + "temperature <32◦→call for repairs; turn on electric heater\n", + "(temperature <70◦)∧(furnace off)→turn on furnace\n", + "(temperature >72◦)∧(furnace on)→turn off furnace\n", + "Following this work, production systems were adopted by the AI community. The resulting agents con-\n", + "tained large production systems connected to external sensors, actuators, and knowledge bases – requiring\n", + "correspondingly sophisticated control flow. AI researchers defined “cognitive architectures” that mimicked\n", + "CHUNK 13:\n", + "of the environment, which can later be queried to execute instructions.\n", + "Updating LLM parameters (procedural memory). The LLM weights represent implicit procedural\n", + "knowledge. These can be adjusted to an agent’s domain by fine-tuning during the agent’s lifetime. Such fine-\n", + "tuningcanbeaccomplishedviasupervised(Liuetal.,2023c;Zhangetal.,2023b)orimitationlearning(Hussein\n", + "et al., 2017), reinforcement learning (RL) from environment feedback (Sutton and Barto, 2018), human\n", + "feedback (RLHF; Christiano et al., 2017; Ouyang et al., 2022; Nakano et al., 2021), or AI feedback (Bai et al.,\n", + "2022; Liu et al., 2023f). Classic LLM self-improvement methods (Huang et al., 2022a; Zelikman et al., 2022)\n", + "use an external measure such as consistency Wang et al. (2022b) to select generations to fine-tune on. In\n", + "CHUNK 14:\n", + "Language agents move beyond pre-defined prompt chains and instead place the LLM in a feedback loop with\n", + "the external environment (Fig. 1B). These approaches first transform multimodal input into text and pass it\n", + "to the LLM. The LLM’s output is then parsed and used to determine an external action (Fig. 3C). Early\n", + "agents interfaced the LLM directly with the external environment, using it to produce high-level instructions\n", + "based on the agent’s state (Ahn et al., 2022; Huang et al., 2022c; Dasgupta et al., 2022). Later work developed\n", + "more sophisticated language agents that use the LLM to perform intermediate reasoning before selecting\n", + "an action (Yao et al., 2022b). The most recent agents incorporate sophisticated learning strategies such as\n", + "CHUNK 15:\n", + "CoALA’s decision cycle is analogous to a program’s “main” procedure (amethodwithout return values, as\n", + "8 Published in Transactions on Machine Learning Research (02/2024)\n", + "Grounding Retrieval Learning Reasoning\n", + "PlanningExternal Internal\n", + "Figure 5: Agents’ action spaces can be divided into internal memory accesses and external interactions\n", + "with the world. Reasoning andretrieval actions are used to support planning.\n", + "opposed to functions ) that runs in loops continuously, accepting new perceptual input and calling various\n", + "actionprocedures in response.\n", + "CoALA (Figure 4) is inspired by the decades of research in cognitive architectures (Section 2.3), leveraging key\n", + "concepts such as memory, grounding, learning, and decision-making. Yet the incorporation of an LLM leads\n", + " \n", + "==================================================================================================== \n", + "\n", + "Message 7 - HUMAN: Lit, what's my name?\n", + "==================================================================================================== \n", + "\n", + "Message 8 - AI: Your name is Adam.\n" + ] + } + ], + "source": [ + "for i in range(len(output['messages'])):\n", + " print(\"=\"*100, f\"\\n\\nMessage {i+1} - {output['messages'][i].type.upper()}: \", output['messages'][i].content)\n", + " i += 1" + ] + }, + { + "cell_type": "markdown", + "id": "98e0e7a5-59d6-4815-ab1c-d818768104bd", + "metadata": {}, + "source": [ + "---\n", + "**Check Out an Example LangSmith Trace**\n", + "\n", + "https://smith.langchain.com/public/93f5d5c8-67e8-473b-8e51-648291c2d79a/r" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/agentic-memory/langgraph/agentic_memory_langraph.py b/agentic-memory/langgraph/agentic_memory_langraph.py new file mode 100644 index 0000000..3fcaf94 --- /dev/null +++ b/agentic-memory/langgraph/agentic_memory_langraph.py @@ -0,0 +1,715 @@ +Yes, you can definitely create a system that dynamically handles different API keys and models based on a selected type (e.g., OpenAI, Mistral, Google, etc.). Here's how you can structure it using Pydantic, enums, and a bit of factory-like design: + +#!/usr/bin/env python3 +from langchain_openai import ChatOpenAI +from langchain_mistralai.chat_models import ChatMistralAI +from langchain_google_genai import ChatGoogleGenerativeAI +from langchain_community.chat_models import ChatOpenRouter +from langgraph.graph import StateGraph, START, END +from pydantic import BaseModel, Field, field_validator, SecretStr +from typing import List, Optional, Dict, Any, Callable, Type +from weaviate.classes.config import Property, Configure +import weaviate.classes as wvc +from pathlib import Path +import json +from enum import Enum +import weaviate +import os + +# --- Enums --- +class ModelProvider(str, Enum): + OPENAI = "openai" + MISTRAL = "mistral" + GOOGLE = "google" + ANTHROPIC = "anthropic" + OPENROUTER = "openrouter" + OLLAMA = "ollama" + # Add other providers as needed... + +class MessageType(str, Enum): + SYSTEM = "system" + USER = "user" + AI = "ai" + +# --- Pydantic Models --- + +class ApiKey(BaseModel): + provider: ModelProvider + key: SecretStr # Use SecretStr to help protect sensitive values + + @field_validator("key", mode="before") + def check_api_key(cls, value, values): + provider = values.data.get("provider") + env_key = f"{provider.upper()}_API_KEY" + api_key = os.environ.get(env_key) + + if not api_key: + raise ValueError(f"API key for {provider} not set in environment variables ({env_key})") + + return SecretStr(api_key) + +class ModelConfig(BaseModel): + provider: ModelProvider + model_name: str + api_key: ApiKey + parameters: Optional[Dict[str, Any]] = Field(default_factory=dict) + +class Message(BaseModel): + """Base message model for all communications""" + role: MessageType + content: str + type: MessageType + + @field_validator("role", mode="before") + def validate_role(cls, value): + if isinstance(value, str): + return MessageType(value) + return value + +class DocumentChunk(BaseModel): + """Represents a chunk of a document.""" + content: str + source: str + metadata: Optional[Dict[str, Any]] = None + +class State(BaseModel): + """Main state model for the memory agent""" + messages: List[Message] = Field(default_factory=list) + semantic_memory: str = "" + document_memory: str = "" + procedural_memory: str = "" + prior_conversations: List[str] = Field(default_factory=list) + what_worked: List[str] = Field(default_factory=list) + what_to_avoid: List[str] = Field(default_factory=list) + model_config: Optional[ModelConfig] = None + end: bool = False + +# --- LLM Provider Factory --- +def get_llm_provider(config: ModelConfig): + """Factory function to get the appropriate LLM provider.""" + if config.provider == ModelProvider.OPENAI: + return ChatOpenAI(model=config.model_name, openai_api_key=config.api_key.key.get_secret_value(), **config.parameters) + elif config.provider == ModelProvider.MISTRAL: + return ChatMistralAI(model=config.model_name, mistral_api_key=config.api_key.key.get_secret_value(), **config.parameters) + elif config.provider == ModelProvider.GOOGLE: + return ChatGoogleGenerativeAI(model=config.model_name, google_api_key=config.api_key.key.get_secret_value(), **config.parameters) + elif config.provider == ModelProvider.OPENROUTER: + return ChatOpenRouter(model=config.model_name, openrouter_api_key=config.api_key.key.get_secret_value(), **config.parameters) + # Add other providers here... + else: + raise ValueError(f"Unsupported LLM provider: {config.provider}") + +# --- Helper Functions --- + +def format_conversation(messages: List[Message]) -> str: + """Format messages into a readable conversation string""" + conversation = [] + for message in messages: + conversation.append(f"{message.type.value.upper()}: {message.content}") + return "\n".join(conversation) + +def episodic_recall(query: str, vdb_client) -> Dict[str, Any]: + """Retrieve relevant episodic memory""" + episodic_memory = vdb_client.collections.get("episodic_memory") + memory = episodic_memory.query.hybrid( + query=query, + alpha=0.5, + limit=1, + ) + return memory + +def semantic_recall(query: str, vdb_client) -> str: + """Retrieve relevant semantic knowledge""" + coala_collection = vdb_client.collections.get("CoALA_Paper") + memories = coala_collection.query.hybrid( + query=query, + alpha=0.5, + limit=15, + ) + + combined_text = "" + for i, memory in enumerate(memories.objects): + combined_text += f"\nCHUNK {i + 1}:\n" + combined_text += memory.properties['chunk'].strip() + + return combined_text + +def simple_text_splitter(text: str, chunk_size: int = 1000, chunk_overlap: int = 100) -> List[str]: + """A basic text splitter for demonstration.""" + chunks = [] + for i in range(0, len(text), chunk_size - chunk_overlap): + chunks.append(text[i:i + chunk_size]) + return chunks + +def load_and_chunk_document(filepath: str) -> List[DocumentChunk]: + """Loads a document and splits it into chunks (basic implementation).""" + file_path = Path(filepath) + if not file_path.exists(): + raise FileNotFoundError(f"File not found: {filepath}") + + try: + if filepath.endswith(".txt"): + with open(filepath, "r") as f: + text = f.read() + elif filepath.endswith(".pdf"): + # Requires pypdf + import pypdf + reader = pypdf.PdfReader(filepath) + text = "" + for page in reader.pages: + text += page.extract_text() + "\n" + elif filepath.endswith(".docx"): + # Requires python-docx + import docx + doc = docx.Document(filepath) + text = "\n".join([paragraph.text for paragraph in doc.paragraphs]) + else: + raise ValueError("Unsupported file type") + except ImportError as e: + raise ImportError(f"Missing library for file type. {e}") + + chunks = simple_text_splitter(text) + return [DocumentChunk(content=chunk, source=filepath) for chunk in chunks] + +def ingest_document(filepath: str, vdb_client): + """Loads a document, chunks it, and adds it to Weaviate.""" + try: + chunks = load_and_chunk_document(filepath) + except (FileNotFoundError, ValueError, ImportError) as e: + print(f"Error processing document: {e}") + return + + document_chunks = vdb_client.collections.get("DocumentChunk") + + for chunk in chunks: + data_properties = { + "content": chunk.content, + "source": chunk.source, + "metadata": chunk.metadata # If available + } + document_chunks.data.insert( + properties=data_properties + ) + +def document_recall(query: str, vdb_client) -> str: + """Retrieves relevant document chunks from Weaviate.""" + document_chunks = vdb_client.collections.get("DocumentChunk") + response = document_chunks.query.hybrid( + query=query, + alpha=0.75, + limit=5, + ) + + retrieved_chunks = "" + for item in response.objects: + retrieved_chunks += item.properties['content'] + "\n---\n" + + return retrieved_chunks + +# --- Node Functions --- + +def populate_state(state: State) -> dict: + """Initialize state with first user message and model configuration""" + # Get user input + first_query = input("User: ") + first_message = Message(role=MessageType.USER, content=first_query, type=MessageType.USER) + + # Load procedural memory + with open("./langgraph/procedural_memory_lg.txt", "r") as content: + procedural_memory = content.read() + + # Get episodic memory + episodic_memory_retrieval = episodic_recall(first_query, vdb_client) + episodic_memory = episodic_memory_retrieval.objects[0].properties + + # Create system prompt + episodic_prompt = f"""You are a helpful AI Assistant. Answer the user's questions to the best of your ability. + You recall similar conversations with the user, here are the details: + + Current Conversation Match: {episodic_memory['conversation']} + Previous Conversations: {"N/A"} + What has worked well: {episodic_memory['what_worked']} + What to avoid: {episodic_memory['what_to_avoid']} + + Use these memories as context for your response to the user. + + Additionally, here are 10 guidelines for interactions with the current user: {procedural_memory}""" + + system_message = Message(role=MessageType.SYSTEM, content=episodic_prompt, type=MessageType.SYSTEM) + + # Get semantic memory + semantic_memory_retrieval = semantic_recall(first_query, vdb_client) + semantic_prompt = f"""If needed, Use this grounded context to factually answer the next question. + Let me know if you do not have enough information or context to answer a question. + + {semantic_memory_retrieval} + """ + semantic_message = Message(role=MessageType.USER, content=semantic_prompt, type=MessageType.USER) + + # Create initial messages + initial_messages = [system_message, semantic_message, first_message] + + # Example model configuration (replace with user input or configuration mechanism) + model_config = ModelConfig( + provider=ModelProvider.OPENAI, + model_name="gpt-4o", + api_key=ApiKey(provider=ModelProvider.OPENAI, key="YOUR-API-KEY"), # Replace "YOUR-API-KEY" with "" + parameters={"temperature": 0.7} + ) + + return { + "messages": initial_messages, + "semantic_memory": semantic_memory_retrieval, + "document_memory": "", + "prior_conversations": [episodic_memory['conversation']], + "what_worked": [episodic_memory['what_worked']], + "what_to_avoid": [episodic_memory['what_to_avoid']], + "procedural_memory": procedural_memory, + "model_config": model_config, # Add model configuration to state + "end": False + } + +def memory_agent(state: State) -> dict: + """Process messages through LLM""" + messages = state.messages + model_config = state.model_config + + # Get the appropriate LLM provider based on the model configuration + llm = get_llm_provider(model_config) + + # Convert messages to a format that the LLM expects + formatted_messages = [] + for msg in messages: + if msg.role == MessageType.SYSTEM: + formatted_messages.append({"role": "system", "content": msg.content}) + elif msg.role == MessageType.USER: + formatted_messages.append({"role": "user", "content": msg.content}) + else: + formatted_messages.append({"role": "assistant", "content": msg.content}) + + # Construct the input for the LLM invocation + llm_input = [] + for msg in formatted_messages: + if msg["role"] == "system": + llm_input.append(SystemMessage(content=msg["content"])) + elif msg["role"] == "user": + llm_input.append(HumanMessage(content=msg["content"])) + elif msg["role"] == "assistant": + llm_input.append(AIMessage(content=msg["content"])) + + response = llm.invoke(llm_input) + print("\nAI: ", response.content) + + # Add response to messages + messages.append(Message(role=MessageType.AI, content=response.content, type=MessageType.AI)) + + return {"messages": messages} + +def user_response(state: State) -> dict: + """Handle user input, update memory context, and optionally ingest documents""" + messages = state.messages + messages = messages[1:] + messages = messages[:-3] + messages[-2:] + + query = input("\nUser: ") + + if query == "exit": + return {"end": True} + + if query.startswith("upload:"): + filepath = query.split("upload:")[1].strip() + try: + ingest_document(filepath, vdb_client) + print(f"Document '{filepath}' ingested successfully.") + return { + "messages": state.messages, + "semantic_memory": state.semantic_memory, + "document_memory": "", + "prior_conversations": state.prior_conversations, + "what_worked": state.what_worked, + "what_to_avoid": state.what_to_avoid, + "procedural_memory": state.procedural_memory, + "model_config": state.model_config, + "end": False + } + except Exception as e: + print(f"Error ingesting document: {e}") + return { + "messages": state.messages, + "semantic_memory": state.semantic_memory, + "document_memory": "", + "prior_conversations": state.prior_conversations, + "what_worked": state.what_worked, + "what_to_avoid": state.what_to_avoid, + "procedural_memory": state.procedural_memory, + "model_config": state.model_config, + "end": False + } + + episodic_memory_retrieval = episodic_recall(query, vdb_client) + episodic_memory = episodic_memory_retrieval.objects[0].properties + + current_conversation = episodic_memory['conversation'] + prior_conversations = state.prior_conversations + if current_conversation not in prior_conversations: + prior_conversations.append(current_conversation) + + previous_convos = [conv for conv in prior_conversations[-4:] + if conv != current_conversation][-3:] + + state_what_worked = list(set(state.what_worked + + episodic_memory['what_worked'].split('. '))) + state_what_to_avoid = list(set(state.what_to_avoid + + episodic_memory['what_to_avoid'].split('. '))) + + episodic_prompt = f"""You are a helpful AI Assistant. Answer the user's questions to the best of your ability. + You recall similar conversations with the user, here are the details: + + Current Conversation Match: {current_conversation} + Previous Conversations: {' | '.join(previous_convos)} + What has worked well: {state_what_worked} + What to avoid: {state_what_to_avoid} + + Use these memories as context for your response to the user. + + Additionally, here are 10 guidelines for interactions with the current user: {state.procedural_memory}""" + + semantic_memory_retrieval = semantic_recall(query, vdb_client) + document_memory_retrieval = document_recall(query, vdb_client) + document_prompt = f"""If needed, use this grounded context from uploaded documents to answer the next question. + Let me know if you do not have enough information or context to answer a question. + + {document_memory_retrieval} + """ + semantic_prompt = f"""If needed, Use this grounded context to factually answer the next question. + Let me know if you do not have enough information or context to answer a question. + + {semantic_memory_retrieval} + """ + + system_message = Message(role=MessageType.SYSTEM, content=episodic_prompt, type=MessageType.SYSTEM) + semantic_message = Message(role=MessageType.USER, content=semantic_prompt, type=MessageType.USER) + document_message = Message(role=MessageType.USER, content=document_prompt, type=MessageType.USER) + user_message = Message(role=MessageType.USER, content=query, type=MessageType.USER) + + final_messages = [system_message] + final_messages.extend(messages) + final_messages.append(semantic_message) + final_messages.append(document_message) + final_messages.append(user_message) + + return { + "messages": final_messages, + "semantic_memory": semantic_memory_retrieval, + "document_memory": document_memory_retrieval, + "prior_conversations": prior_conversations, + "what_worked": state_what_worked, + "what_to_avoid": state_what_to_avoid, + "procedural_memory": state.procedural_memory, + "model_config": state.model_config, + "end": False + } + +def update_memory(state: State) -> None: + """Update episodic and procedural memory""" + messages = state.messages + messages = messages[1:] + messages = messages[:-4] + messages[-2:] + + conversation = format_conversation(messages) + + reflection_template = """ + You are analyzing conversations to create memories that will help guide future interactions. + Review the conversation and create a memory reflection following these rules: + 1. For any field where you don't have enough information, use "N/A" + 2. Be extremely concise - each string should be one clear, actionable sentence + 3. Focus only on information that would be useful for future conversations + 4. Context_tags should be specific enough to match similar situations but general enough to be reusable + + Output valid JSON in exactly this format: + { + "context_tags": [string], + "conversation_summary": string, + "what_worked": string, + "what_to_avoid": string + } + + Here is the conversation: + {conversation} + """ + llm = get_llm_provider(state.model_config) + + # Construct the input for the LLM invocation + formatted_input = { + "conversation": conversation + } + + # Use the ChatPromptTemplate to format the input + prompt_template = ChatPromptTemplate.from_template(reflection_template) + formatted_prompt = prompt_template.format_messages(**formatted_input) + + # Invoke the LLM with the formatted prompt + response = llm.invoke(formatted_prompt) + + # Parse the response content as JSON + reflection_output = json.loads(response.content) + + episodic_memory = vdb_client.collections.get("episodic_memory") + episodic_memory.data.insert({ + "conversation": conversation, + "context_tags": reflection_output['context_tags'], + "conversation_summary": reflection_output['conversation_summary'], + "what_worked": reflection_output['what_worked'], + "what_to_avoid": reflection_output['what_to_avoid'], + }) + print("\n=== Updated Episodic Memory ===") + + with open("./langgraph/procedural_memory_lg.txt", "r") as content: + current_takeaways = content.read() + + procedural_prompt = f"""You are maintaining a continuously updated list of the most important procedural behavior instructions for an AI assistant. + Your task is to refine and improve a list of key takeaways based on new conversation feedback while maintaining the most valuable existing insights. + + CURRENT TAKEAWAYS: + {current_takeaways} + + NEW FEEDBACK: + What Worked Well: + {state.what_worked} + + What To Avoid: + {state.what_to_avoid} + + Please generate an updated list of up to 10 key takeaways that combines: + 1. The most valuable insights from the current takeaways + 2. New learnings from the recent feedback + 3. Any synthesized insights combining multiple learnings + + Requirements for each takeaway: + - Must be specific and actionable + - Should address a distinct aspect of behavior + - Include a clear rationale + - Written in imperative form (e.g., "Maintain conversation context by...") + + Format each takeaway as: + [#]. [Instruction] - [Brief rationale] + + Return just the list, no preamble or explanation. + """ + + llm_input = SystemMessage(content=procedural_prompt) + + procedural_memory = llm.invoke([llm_input]) + + with open("./langgraph/procedural_memory_lg.txt", "w") as content: + content.write(procedural_memory.content) + + print("\n=== Updated Procedural Memory ===") + +def check_end(state: State) -> str: + """Check if conversation should end""" + return "stop" if state.end else "continue" + +# --- Graph Setup --- + +def create_graph(): + """Create and compile the agent graph""" + graph = StateGraph(State) + + graph.add_node("populate_state", populate_state) + graph.add_node("memory_agent", memory_agent) + graph.add_node("user_response", user_response) + graph.add_node("update_memory", update_memory) + + graph.add_edge(START, "populate_state") + graph.add_edge("populate_state", "memory_agent") + graph.add_edge("memory_agent", "user_response") + graph.add_conditional_edges( + "user_response", + check_end, + { + "continue": "memory_agent", + "stop": "update_memory", + } + ) + graph.add_edge("update_memory", END) + + return graph.compile() + +# --- Main Execution --- + +if __name__ == "__main__": + # Connect to Vector Database + vdb_client = weaviate.connect_to_local() + + # Check if connection to weaviate database is made + if vdb_client.is_ready(): + print("Connected to Weaviate: ", vdb_client.is_ready()) + else: + print("Failed to connect to Weaviate.") + exit() # Exit if no connection + + # Create the necessary collections if they don't exist + if not vdb_client.collections.exists("episodic_memory"): + vdb_client.collections.create( + name="episodic_memory", + description="Episodic memory storage", + vectorizer_config=wvc.config.Configure.Vectorizer.text2vec_openai(), + properties=[ + Property(name="conversation", data_type=wvc.config.DataType.TEXT), + Property(name="context_tags", data_type=wvc.config.DataType.TEXT_ARRAY), + Property(name="conversation_summary", data_type=wvc.config.DataType.TEXT), + Property(name="what_worked", data_type=wvc.config.DataType.TEXT), + Property(name="what_to_avoid", data_type=wvc.config.DataType.TEXT), + ] + ) + + if not vdb_client.collections.exists("CoALA_Paper"): + vdb_client.collections.create( + name="CoALA_Paper", + description="Semantic memory storage based on CoALA paper content", + vectorizer_config=wvc.config.Configure.Vectorizer.text2vec_openai(), + properties=[ + Property(name="chunk", data_type=wvc.config.DataType.TEXT), + ] + ) + + if not vdb_client.collections.exists("DocumentChunk"): + vdb_client.collections.create( + name="DocumentChunk", + description="Chunks of text extracted from documents", + vectorizer_config=wvc.config.Configure.Vectorizer.text2vec_openai(), + properties=[ + Property(name="content", data_type=wvc.config.DataType.TEXT), + Property(name="source", data_type=wvc.config.DataType.TEXT), + Property(name="metadata", data_type=wvc.config.DataType.OBJECT), + ] + ) + + graph = create_graph() + graph.invoke(State()) + +''' +content_copy +Use code with caution. +Python + +Key Changes and Explanations: + +ModelProvider Enum: + +Defines the supported LLM providers (OpenAI, Mistral, Google, etc.). + +Makes it easy to add new providers in the future. + +ApiKey Model: + +Represents an API key using Pydantic. + +Uses SecretStr to handle sensitive API keys more securely. + +Includes a field_validator to ensure that each ApiKey instance contains a provider attribute. + +The validator also dynamically fetches the corresponding environment variable based on the provider to set the API key value securely. + +ModelConfig Model: + +provider: Specifies the LLM provider (from the ModelProvider enum). + +model_name: The specific model to use (e.g., "gpt-4o", "text-bison@001"). + +api_key: An instance of the ApiKey model. + +parameters: An optional dictionary for provider-specific parameters (e.g., temperature, max_tokens). + +get_llm_provider Factory: + +Takes a ModelConfig as input. + +Uses a conditional structure (if/elif/else) to instantiate the correct LLM provider class based on config.provider. + +Passes the model_name, api_key, and parameters to the provider's constructor. + +Raises a ValueError if an unsupported provider is specified. + +populate_state Modification: + +Added logic to get model configuration (you'll need a mechanism for the user to provide this or have it stored/loaded). + +Added a placeholder for creating a ModelConfig instance. + +The model_config is added to the initial state. + +memory_agent Modification: + +Retrieves the model_config from the state. + +Uses get_llm_provider(model_config) to get the LLM instance. + +The rest of the logic remains similar, but now it uses the dynamically created LLM provider. + +ApiKey Field Validator: + +The check_api_key validator now also checks if the corresponding environment variable is set and raises a ValueError if it's missing. + +This ensures that the API key is correctly set before the agent starts. + +How to Use: + +Install Dependencies: + +pip install langchain langchain-openai pydantic weaviate-client python-dotenv docx pypdf +content_copy +Use code with caution. +Bash + +Set Environment Variables: + +Set the appropriate environment variables for your API keys: + +export OPENAI_API_KEY="your_openai_key" +export MISTRAL_API_KEY="your_mistral_key" +export GOOGLE_API_KEY="your_google_key" +# ...and so on +content_copy +Use code with caution. +Bash + +Run the Script: + +python your_script_name.py +content_copy +Use code with caution. +Bash + +Interact: + +The agent will now use the model and API key specified in the ModelConfig. + +Advantages of This Approach: + +Flexibility: Easily switch between different LLM providers by modifying the ModelConfig. + +Extensibility: Add new providers to the ModelProvider enum and the get_llm_provider factory without major code changes. + +Organization: Pydantic models provide a clear and structured way to manage configurations and data. + +Security: SecretStr helps to protect sensitive API keys. + +Maintainability: The code is more modular and easier to maintain. + +Further Improvements: + +Configuration Input: Implement a mechanism for users to provide the ModelConfig either through command-line arguments, a configuration file, or a user interface. + +Dynamic Loading: Instead of hardcoding the providers in get_llm_provider, you could use a plugin-like architecture to dynamically load provider modules based on the configuration. + +Asynchronous Operations: Consider using asynchronous operations for LLM calls and Weaviate interactions to improve performance, especially when dealing with multiple providers or large documents. + +Advanced Prompting: As you move away from LangChain's prompts, you might need to implement more sophisticated prompt engineering logic yourself, potentially using a templating engine if the prompts become complex. + +Error Handling: Implement robust error handling throughout the code to gracefully handle API errors, invalid configurations, missing dependencies, etc. + +This updated code provides a more Pydantic-centric and flexible foundation for your agentic system, making it adaptable to different LLMs and API keys while maintaining a well-structured and organized codebase. + +''' \ No newline at end of file diff --git a/agentic-memory/langgraph/procedural_memory_lg.txt b/agentic-memory/langgraph/procedural_memory_lg.txt new file mode 100644 index 0000000..2a3c481 --- /dev/null +++ b/agentic-memory/langgraph/procedural_memory_lg.txt @@ -0,0 +1,10 @@ +1. Maintain conversation context by recalling past interactions - Builds continuity and demonstrates understanding of the user's needs. +2. Use clear and concise language to convey information - Enhances understanding and avoids confusion. +3. Offer structured breakdowns for complex topics and explain their learning mechanisms - Facilitates comprehension and highlights key roles and functions. +4. Ask clarifying questions when user requests are ambiguous - Ensures accurate assistance and reduces misunderstandings. +5. Provide step-by-step guidance for complex tasks - Facilitates user comprehension and successful task completion. +6. Acknowledge user emotions and respond empathetically - Builds trust and rapport with the user. +7. Confirm and repeat the user's name to acknowledge recognition - Reinforces a personal connection and shows attentiveness. +8. Offer alternative solutions when initial suggestions are not feasible - Demonstrates flexibility and commitment to user satisfaction. +9. Provide specific suggestions tailored to the user's stated preferences - Shows attentiveness to user needs and enhances satisfaction. +10. Continuously learn from user feedback to improve response quality - Enhances overall effectiveness and user experience. \ No newline at end of file diff --git a/agentic-memory/procedural_memory.txt b/agentic-memory/procedural_memory.txt new file mode 100644 index 0000000..0526b32 --- /dev/null +++ b/agentic-memory/procedural_memory.txt @@ -0,0 +1,19 @@ +1. Confirm and repeat the user's name throughout the conversation - Personalizes the interaction and enhances user engagement by acknowledging recognition. + +2. Maintain conversation context by recalling previous interactions - Builds rapport and shows attention to user preferences over time. + +3. Ask for and confirm user preferences before making suggestions - Avoids assumptions and ensures recommendations are personalized and relevant. + +4. Provide clear summaries and use concise language with examples when explaining complex topics - Facilitates understanding and ensures instructions are easily actionable. + +5. Respect user choices and offer alternatives if initial suggestions don't resonate - Demonstrates flexibility and commitment to user satisfaction. + +6. Verify user interest before providing additional information - Prevents overwhelming users and ensures engagement by checking their interest first. + +7. Promptly ask users to provide any missing personal information - Completes the conversation and enhances personalization, increasing user satisfaction. + +8. Maintain a friendly and helpful tone throughout interactions - Fosters a positive user experience and promotes ongoing engagement. + +9. Acknowledge and confirm the receipt of user feedback - Reinforces user trust and demonstrates commitment to continuous improvement. + +10. Encourage and incorporate user feedback promptly into service improvements - Engages users and ensures a responsive interaction experience. \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 287ea41..673ac27 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7,6 +7,7 @@ "": { "name": "sota-swe", "version": "0.0.11", + "hasInstallScript": true, "dependencies": { "@google-cloud/storage": "^7.0.1", "@radix-ui/react-alert-dialog": "^1.1.2", @@ -91,6 +92,7 @@ "typescript": "^5.6.3" }, "engines": { + "python": ">=3.9.0", "vscode": "^1.84.0" } }, @@ -134,9 +136,9 @@ } }, "node_modules/@babel/compat-data": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.3.tgz", - "integrity": "sha512-nHIxvKPniQXpmQLb0vhY3VaFb3S0YrTAwpOWJZh1wn3oJPjJk9Asva204PsBdmAE8vpzfHudT8DB0scYvy9q0g==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.2.tgz", + "integrity": "sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg==", "dev": true, "engines": { "node": ">=6.9.0" @@ -173,13 +175,13 @@ } }, "node_modules/@babel/generator": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.3.tgz", - "integrity": "sha512-6FF/urZvD0sTeO7k6/B15pMLC4CHUv1426lzr3N01aHJTl046uCAh9LXW/fzeXXjPNCJ6iABW5XaWOsIZB93aQ==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz", + "integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==", "dev": true, "dependencies": { - "@babel/parser": "^7.26.3", - "@babel/types": "^7.26.3", + "@babel/parser": "^7.26.2", + "@babel/types": "^7.26.0", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^3.0.2" @@ -248,6 +250,7 @@ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz", "integrity": "sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==", "dev": true, + "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -293,12 +296,12 @@ } }, "node_modules/@babel/parser": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.3.tgz", - "integrity": "sha512-WJ/CvmY8Mea8iDXo6a7RK2wbmJITT5fN3BEkRuFlxVyNx8jOKIIhmC4fSkTcPcf8JyavbBwIe6OpiCOBXt/IcA==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", + "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", "dev": true, "dependencies": { - "@babel/types": "^7.26.3" + "@babel/types": "^7.26.0" }, "bin": { "parser": "bin/babel-parser.js" @@ -312,6 +315,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.26.0.tgz", "integrity": "sha512-B+O2DnPc0iG+YXFqOxv2WNuNU97ToWjOomUQ78DouOENWUaM5sVrmet9mcomUGQFwpJd//gvUagXBSdzO1fRKg==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.25.9" }, @@ -327,6 +331,7 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.25.9.tgz", "integrity": "sha512-/VVukELzPDdci7UUsWQaSkhgnjIWXnIyRpM02ldxaVoFK96c41So8JcKT3m0gYjyv7j5FNPGS5vfELrWalkbDA==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.25.9", "@babel/plugin-syntax-flow": "^7.25.9" @@ -343,6 +348,7 @@ "resolved": "https://registry.npmjs.org/@babel/preset-flow/-/preset-flow-7.25.9.tgz", "integrity": "sha512-EASHsAhE+SSlEzJ4bzfusnXSHiU+JfAYzj+jbw2vgQKgq5HrUr8qs+vgtiEL5dOH6sEweI+PNt2D7AqrDSHyqQ==", "dev": true, + "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.25.9", "@babel/helper-validator-option": "^7.25.9", @@ -370,16 +376,16 @@ } }, "node_modules/@babel/traverse": { - "version": "7.26.4", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.4.tgz", - "integrity": "sha512-fH+b7Y4p3yqvApJALCPJcwb0/XaOSgtK4pzV6WVjPR5GLFQBRI7pfoX2V2iM48NXvX07NUxxm1Vw98YjqTcU5w==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.9.tgz", + "integrity": "sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.3", - "@babel/parser": "^7.26.3", + "@babel/code-frame": "^7.25.9", + "@babel/generator": "^7.25.9", + "@babel/parser": "^7.25.9", "@babel/template": "^7.25.9", - "@babel/types": "^7.26.3", + "@babel/types": "^7.25.9", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -388,9 +394,9 @@ } }, "node_modules/@babel/types": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.3.tgz", - "integrity": "sha512-vN5p+1kl59GVKMvTHt55NzzmYVxprfJD+ql7U9NFIfKCBkYE55LYtS+WtPlaYOyzydrKI8Nezd+aZextrd+FMA==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.0.tgz", + "integrity": "sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==", "dev": true, "dependencies": { "@babel/helper-string-parser": "^7.25.9", @@ -424,262 +430,6 @@ "kuler": "^2.0.0" } }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.23.1.tgz", - "integrity": "sha512-6VhYk1diRqrhBAqpJEdjASR/+WVRtfjpqKuNw11cLiaWpAT/Uu+nokB+UJnevzy/P9C/ty6AOe0dwueMrGh/iQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.23.1.tgz", - "integrity": "sha512-uz6/tEy2IFm9RYOyvKl88zdzZfwEfKZmnX9Cj1BHjeSGNuGLuMD1kR8y5bteYmwqKm1tj8m4cb/aKEorr6fHWQ==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.23.1.tgz", - "integrity": "sha512-xw50ipykXcLstLeWH7WRdQuysJqejuAGPd30vd1i5zSyKK3WE+ijzHmLKxdiCMtH1pHz78rOg0BKSYOSB/2Khw==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.23.1.tgz", - "integrity": "sha512-nlN9B69St9BwUoB+jkyU090bru8L0NA3yFvAd7k8dNsVH8bi9a8cUAUSEcEEgTp2z3dbEDGJGfP6VUnkQnlReg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.23.1.tgz", - "integrity": "sha512-YsS2e3Wtgnw7Wq53XXBLcV6JhRsEq8hkfg91ESVadIrzr9wO6jJDMZnCQbHm1Guc5t/CdDiFSSfWP58FNuvT3Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.23.1.tgz", - "integrity": "sha512-aClqdgTDVPSEGgoCS8QDG37Gu8yc9lTHNAQlsztQ6ENetKEO//b8y31MMu2ZaPbn4kVsIABzVLXYLhCGekGDqw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.23.1.tgz", - "integrity": "sha512-h1k6yS8/pN/NHlMl5+v4XPfikhJulk4G+tKGFIOwURBSFzE8bixw1ebjluLOjfwtLqY0kewfjLSrO6tN2MgIhA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.23.1.tgz", - "integrity": "sha512-lK1eJeyk1ZX8UklqFd/3A60UuZ/6UVfGT2LuGo3Wp4/z7eRTRYY+0xOu2kpClP+vMTi9wKOfXi2vjUpO1Ro76g==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.23.1.tgz", - "integrity": "sha512-CXXkzgn+dXAPs3WBwE+Kvnrf4WECwBdfjfeYHpMeVxWE0EceB6vhWGShs6wi0IYEqMSIzdOF1XjQ/Mkm5d7ZdQ==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.23.1.tgz", - "integrity": "sha512-/93bf2yxencYDnItMYV/v116zff6UyTjo4EtEQjUBeGiVpMmffDNUyD9UN2zV+V3LRV3/on4xdZ26NKzn6754g==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.23.1.tgz", - "integrity": "sha512-VTN4EuOHwXEkXzX5nTvVY4s7E/Krz7COC8xkftbbKRYAl96vPiUssGkeMELQMOnLOJ8k3BY1+ZY52tttZnHcXQ==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.23.1.tgz", - "integrity": "sha512-Vx09LzEoBa5zDnieH8LSMRToj7ir/Jeq0Gu6qJ/1GcBq9GkfoEAoXvLiW1U9J1qE/Y/Oyaq33w5p2ZWrNNHNEw==", - "cpu": [ - "loong64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.23.1.tgz", - "integrity": "sha512-nrFzzMQ7W4WRLNUOU5dlWAqa6yVeI0P78WKGUo7lg2HShq/yx+UYkeNSE0SSfSure0SqgnsxPvmAUu/vu0E+3Q==", - "cpu": [ - "mips64el" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.23.1.tgz", - "integrity": "sha512-dKN8fgVqd0vUIjxuJI6P/9SSSe/mB9rvA98CSH2sJnlZ/OCZWO1DJvxj8jvKTfYUdGfcq2dDxoKaC6bHuTlgcw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.23.1.tgz", - "integrity": "sha512-5AV4Pzp80fhHL83JM6LoA6pTQVWgB1HovMBsLQ9OZWLDqVY8MVobBXNSmAJi//Csh6tcY7e7Lny2Hg1tElMjIA==", - "cpu": [ - "riscv64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.23.1.tgz", - "integrity": "sha512-9ygs73tuFCe6f6m/Tb+9LtYxWR4c9yg7zjt2cYkjDbDpV/xVn+68cQxMXCjUpYwEkze2RcU/rMnfIXNRFmSoDw==", - "cpu": [ - "s390x" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, "node_modules/@esbuild/linux-x64": { "version": "0.23.1", "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.23.1.tgz", @@ -696,118 +446,6 @@ "node": ">=18" } }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.23.1.tgz", - "integrity": "sha512-aevEkCNu7KlPRpYLjwmdcuNz6bDFiE7Z8XC4CPqExjTvrHugh28QzUXVOZtiYghciKUacNktqxdpymplil1beA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-arm64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.23.1.tgz", - "integrity": "sha512-3x37szhLexNA4bXhLrCC/LImN/YtWis6WXr1VESlfVtVeoFJBRINPJ3f0a/6LV8zpikqoUg4hyXw0sFBt5Cr+Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.23.1.tgz", - "integrity": "sha512-aY2gMmKmPhxfU+0EdnN+XNtGbjfQgwZj43k8G3fyrDM/UdZww6xrWxmDkuz2eCZchqVeABjV5BpildOrUbBTqA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.23.1.tgz", - "integrity": "sha512-RBRT2gqEl0IKQABT4XTj78tpk9v7ehp+mazn2HbUeZl1YMdaGAQqhapjGTCe7uw7y0frDi4gS0uHzhvpFuI1sA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.23.1.tgz", - "integrity": "sha512-4O+gPR5rEBe2FpKOVyiJ7wNDPA8nGzDuJ6gN4okSA1gEOYZ67N8JPk58tkWtdtPeLz7lBnY6I5L3jdsr3S+A6A==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.23.1.tgz", - "integrity": "sha512-BcaL0Vn6QwCwre3Y717nVHZbAa4UBEigzFm6VdsVdT/MbZ38xoj1X9HPkZhbmaBGUD1W8vxAfffbDe8bA6AKnQ==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.23.1", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.23.1.tgz", - "integrity": "sha512-BHpFFeslkWrXWyUPnbKm+xYYVYruCinGcftSBaa8zoF9hZO4BcSCFUvHVTtzpIY6YzUnYtuEhZ+C9iEXjxnasg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, "node_modules/@eslint-community/eslint-utils": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.1.tgz", @@ -836,12 +474,12 @@ } }, "node_modules/@eslint/config-array": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.1.tgz", - "integrity": "sha512-fo6Mtm5mWyKjA/Chy1BYTdn5mGJoDNjC7C64ug20ADsRDGrA85bN3uK3MaKbeRkRuuIEAR5N33Jr1pbm411/PA==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.0.tgz", + "integrity": "sha512-zdHg2FPIFNKPdcHWtiNT+jEFCHYVplAXRDlQDyqy0zGx/q2parwh7brGJSiTxRk/TSMkbM//zt/f5CHgyTyaSQ==", "dev": true, "dependencies": { - "@eslint/object-schema": "^2.1.5", + "@eslint/object-schema": "^2.1.4", "debug": "^4.3.1", "minimatch": "^3.1.2" }, @@ -872,13 +510,10 @@ } }, "node_modules/@eslint/core": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.9.1.tgz", - "integrity": "sha512-GuUdqkyyzQI5RMIWkHhvTWLCyLo1jNK3vzkSyaExH5kHPDHcuL2VOpHjmMY+y3+NC69qAKToBqldTBgYeLSr9Q==", + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.9.0.tgz", + "integrity": "sha512-7ATR9F0e4W85D/0w7cU0SNj7qkAexMG+bAHEZOjo9akvGuhHE2m7umzWzfnpa0XAg5Kxc1BWmtPMV67jJ+9VUg==", "dev": true, - "dependencies": { - "@types/json-schema": "^7.0.15" - }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } @@ -941,27 +576,27 @@ } }, "node_modules/@eslint/js": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz", - "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==", + "version": "9.15.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.15.0.tgz", + "integrity": "sha512-tMTqrY+EzbXmKJR5ToI8lxu7jaN5EdmrBFJpQk5JmSlyLsx6o4t27r883K5xsLuCYCpfKBCGswMSWXsM+jB7lg==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/object-schema": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.5.tgz", - "integrity": "sha512-o0bhxnL89h5Bae5T318nFoFzGy+YE5i/gGkoPAgkmTVdRKTiv3p8JHevPiPaMwoloKfEiiaHlawCqaZMqRm+XQ==", + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.4.tgz", + "integrity": "sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/plugin-kit": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.4.tgz", - "integrity": "sha512-zSkKow6H5Kdm0ZUQUB2kV5JIXqoG0+uH5YADhaEHswm664N9Db8dXSi0nMJpacpMf+MyyglF1vnZohpEg5yUtg==", + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.3.tgz", + "integrity": "sha512-2b/g5hRmpbb1o4GnTZax9N9m0FXzz9OV42ZzI4rDDMDuHUqigAiQCEWChBWCY4ztAGVRjoWT19v0yMmc5/L5kA==", "dev": true, "dependencies": { "levn": "^0.4.1" @@ -1153,9 +788,9 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.8", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", - "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", "dev": true, "dependencies": { "@jridgewell/set-array": "^1.2.1", @@ -1237,195 +872,35 @@ "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.0.tgz", "integrity": "sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ==", "dev": true, - "hasInstallScript": true, - "optional": true, - "dependencies": { - "detect-libc": "^1.0.3", - "is-glob": "^4.0.3", - "micromatch": "^4.0.5", - "node-addon-api": "^7.0.0" - }, - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - }, - "optionalDependencies": { - "@parcel/watcher-android-arm64": "2.5.0", - "@parcel/watcher-darwin-arm64": "2.5.0", - "@parcel/watcher-darwin-x64": "2.5.0", - "@parcel/watcher-freebsd-x64": "2.5.0", - "@parcel/watcher-linux-arm-glibc": "2.5.0", - "@parcel/watcher-linux-arm-musl": "2.5.0", - "@parcel/watcher-linux-arm64-glibc": "2.5.0", - "@parcel/watcher-linux-arm64-musl": "2.5.0", - "@parcel/watcher-linux-x64-glibc": "2.5.0", - "@parcel/watcher-linux-x64-musl": "2.5.0", - "@parcel/watcher-win32-arm64": "2.5.0", - "@parcel/watcher-win32-ia32": "2.5.0", - "@parcel/watcher-win32-x64": "2.5.0" - } - }, - "node_modules/@parcel/watcher-android-arm64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.0.tgz", - "integrity": "sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-darwin-arm64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz", - "integrity": "sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-darwin-x64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.0.tgz", - "integrity": "sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-freebsd-x64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.0.tgz", - "integrity": "sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-linux-arm-glibc": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.0.tgz", - "integrity": "sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-linux-arm-musl": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.0.tgz", - "integrity": "sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-linux-arm64-glibc": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.0.tgz", - "integrity": "sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-linux-arm64-musl": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.0.tgz", - "integrity": "sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], + "hasInstallScript": true, + "optional": true, + "dependencies": { + "detect-libc": "^1.0.3", + "is-glob": "^4.0.3", + "micromatch": "^4.0.5", + "node-addon-api": "^7.0.0" + }, "engines": { "node": ">= 10.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "@parcel/watcher-android-arm64": "2.5.0", + "@parcel/watcher-darwin-arm64": "2.5.0", + "@parcel/watcher-darwin-x64": "2.5.0", + "@parcel/watcher-freebsd-x64": "2.5.0", + "@parcel/watcher-linux-arm-glibc": "2.5.0", + "@parcel/watcher-linux-arm-musl": "2.5.0", + "@parcel/watcher-linux-arm64-glibc": "2.5.0", + "@parcel/watcher-linux-arm64-musl": "2.5.0", + "@parcel/watcher-linux-x64-glibc": "2.5.0", + "@parcel/watcher-linux-x64-musl": "2.5.0", + "@parcel/watcher-win32-arm64": "2.5.0", + "@parcel/watcher-win32-ia32": "2.5.0", + "@parcel/watcher-win32-x64": "2.5.0" } }, "node_modules/@parcel/watcher-linux-x64-glibc": { @@ -1468,66 +943,6 @@ "url": "https://opencollective.com/parcel" } }, - "node_modules/@parcel/watcher-win32-arm64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.0.tgz", - "integrity": "sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-win32-ia32": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.0.tgz", - "integrity": "sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, - "node_modules/@parcel/watcher-win32-x64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.0.tgz", - "integrity": "sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/parcel" - } - }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -2155,59 +1570,65 @@ } }, "node_modules/@shikijs/core": { - "version": "1.24.2", - "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-1.24.2.tgz", - "integrity": "sha512-BpbNUSKIwbKrRRA+BQj0BEWSw+8kOPKDJevWeSE/xIqGX7K0xrCZQ9kK0nnEQyrzsUoka1l81ZtJ2mGaCA32HQ==", - "dependencies": { - "@shikijs/engine-javascript": "1.24.2", - "@shikijs/engine-oniguruma": "1.24.2", - "@shikijs/types": "1.24.2", + "version": "1.23.1", + "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-1.23.1.tgz", + "integrity": "sha512-NuOVgwcHgVC6jBVH5V7iblziw6iQbWWHrj5IlZI3Fqu2yx9awH7OIQkXIcsHsUmY19ckwSgUMgrqExEyP5A0TA==", + "license": "MIT", + "dependencies": { + "@shikijs/engine-javascript": "1.23.1", + "@shikijs/engine-oniguruma": "1.23.1", + "@shikijs/types": "1.23.1", "@shikijs/vscode-textmate": "^9.3.0", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.3" } }, "node_modules/@shikijs/engine-javascript": { - "version": "1.24.2", - "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-1.24.2.tgz", - "integrity": "sha512-EqsmYBJdLEwEiO4H+oExz34a5GhhnVp+jH9Q/XjPjmBPc6TE/x4/gD0X3i0EbkKKNqXYHHJTJUpOLRQNkEzS9Q==", + "version": "1.23.1", + "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-1.23.1.tgz", + "integrity": "sha512-i/LdEwT5k3FVu07SiApRFwRcSJs5QM9+tod5vYCPig1Ywi8GR30zcujbxGQFJHwYD7A5BUqagi8o5KS+LEVgBg==", + "license": "MIT", "dependencies": { - "@shikijs/types": "1.24.2", + "@shikijs/types": "1.23.1", "@shikijs/vscode-textmate": "^9.3.0", - "oniguruma-to-es": "0.7.0" + "oniguruma-to-es": "0.4.1" } }, "node_modules/@shikijs/engine-oniguruma": { - "version": "1.24.2", - "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-1.24.2.tgz", - "integrity": "sha512-ZN6k//aDNWRJs1uKB12pturKHh7GejKugowOFGAuG7TxDRLod1Bd5JhpOikOiFqPmKjKEPtEA6mRCf7q3ulDyQ==", + "version": "1.23.1", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-1.23.1.tgz", + "integrity": "sha512-KQ+lgeJJ5m2ISbUZudLR1qHeH3MnSs2mjFg7bnencgs5jDVPeJ2NVDJ3N5ZHbcTsOIh0qIueyAJnwg7lg7kwXQ==", + "license": "MIT", "dependencies": { - "@shikijs/types": "1.24.2", + "@shikijs/types": "1.23.1", "@shikijs/vscode-textmate": "^9.3.0" } }, "node_modules/@shikijs/markdown-it": { - "version": "1.24.2", - "resolved": "https://registry.npmjs.org/@shikijs/markdown-it/-/markdown-it-1.24.2.tgz", - "integrity": "sha512-vLFRZYudSkrWWrtfBBZy7hM5mZjpC54zdxSNDn25nV6uVSilySmbdt70LyfiuTOtrKQ3p7fjuxojxqM/n6qVCg==", + "version": "1.23.1", + "resolved": "https://registry.npmjs.org/@shikijs/markdown-it/-/markdown-it-1.23.1.tgz", + "integrity": "sha512-Odpj0AiQBe4v6D+XwAQkdErxncVnaBt+nZTc2JDrwWrOjvkM5JfRG55n9idTqGZfO0EMAZrhP7fmstNJ0yKmlg==", + "license": "MIT", "dependencies": { "markdown-it": "^14.1.0", - "shiki": "1.24.2" + "shiki": "1.23.1" } }, "node_modules/@shikijs/types": { - "version": "1.24.2", - "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-1.24.2.tgz", - "integrity": "sha512-bdeWZiDtajGLG9BudI0AHet0b6e7FbR0EsE4jpGaI0YwHm/XJunI9+3uZnzFtX65gsyJ6ngCIWUfA4NWRPnBkQ==", + "version": "1.23.1", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-1.23.1.tgz", + "integrity": "sha512-98A5hGyEhzzAgQh2dAeHKrWW4HfCMeoFER2z16p5eJ+vmPeF6lZ/elEne6/UCU551F/WqkopqRsr1l2Yu6+A0g==", + "license": "MIT", "dependencies": { "@shikijs/vscode-textmate": "^9.3.0", "@types/hast": "^3.0.4" } }, "node_modules/@shikijs/vscode-textmate": { - "version": "9.3.1", - "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-9.3.1.tgz", - "integrity": "sha512-79QfK1393x9Ho60QFyLti+QfdJzRQCVLFb97kOIV7Eo9vQU/roINgk7m24uv0a7AUvN//RDH36FLjjK48v0s9g==" + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-9.3.0.tgz", + "integrity": "sha512-jn7/7ky30idSkd/O5yDBfAnVt+JJpepofP/POZ1iMOxK59cOfqIgg/Dj0eFsjOTMw+4ycJN0uhZH/Eb0bs/EUA==", + "license": "MIT" }, "node_modules/@svgr/babel-plugin-add-jsx-attribute": { "version": "8.0.0", @@ -2435,9 +1856,9 @@ } }, "node_modules/@tiptap/core": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/core/-/core-2.10.3.tgz", - "integrity": "sha512-wAG/0/UsLeZLmshWb6rtWNXKJftcmnned91/HLccHVQAuQZ1UWH+wXeQKu/mtodxEO7JcU2mVPR9mLGQkK0McQ==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/core/-/core-2.10.0.tgz", + "integrity": "sha512-58nAjPxLRFcXepdDqQRC1mhrw6E8Sanqr6bbO4Tz0+FWgDJMZvHG+dOK5wHaDVNSgK2iJDz08ETvQayfOOgDvg==", "peer": true, "funding": { "type": "github", @@ -2448,9 +1869,9 @@ } }, "node_modules/@tiptap/extension-bubble-menu": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/extension-bubble-menu/-/extension-bubble-menu-2.10.3.tgz", - "integrity": "sha512-e9a4yMjQezuKy0rtyyzxbV2IAE1bm1PY3yoZEFrcaY0o47g1CMUn2Hwe+9As2HdntEjQpWR7NO1mZeKxHlBPYA==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/extension-bubble-menu/-/extension-bubble-menu-2.10.0.tgz", + "integrity": "sha512-6CeForthuKbupOACdDm6FMOiiKNC4kMYU923lSv9pC5UrfhJEpHJ1dVrZ0b1dP2hhEc4c1tzly6k0oozIPjrEQ==", "dependencies": { "tippy.js": "^6.3.7" }, @@ -2464,9 +1885,9 @@ } }, "node_modules/@tiptap/extension-document": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/extension-document/-/extension-document-2.10.3.tgz", - "integrity": "sha512-6i8+xbS2zB6t8iFzli1O/QB01MmwyI5Hqiiv4m5lOxqavmJwLss2sRhoMC2hB3CyFg5UmeODy/f/RnI6q5Vixg==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/extension-document/-/extension-document-2.10.0.tgz", + "integrity": "sha512-vseMW3EKiQAPgdbN48Y8F0nRqWhhrAo9DLacAfP7tu0x3uv44uotNjDBtAgp5QmJmqQVyrEdkLSZaU5vFzduhQ==", "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -2476,9 +1897,9 @@ } }, "node_modules/@tiptap/extension-floating-menu": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/extension-floating-menu/-/extension-floating-menu-2.10.3.tgz", - "integrity": "sha512-Prg8rYLxeyzHxfzVu1mDkkUWMnD9ZN3y370O/1qy55e+XKVw9jFkTSuz0y0+OhMJG6bulYpDUMtb+N3+2xOWlQ==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/extension-floating-menu/-/extension-floating-menu-2.10.0.tgz", + "integrity": "sha512-i0fuD1vhUsl9Z9dKoSM4RGYRioHUAEdSL3Wez3On4UTNAfq7RC0UCmaRRVjJSB7QB3AoIr/TwvbUuVb6jvznRg==", "dependencies": { "tippy.js": "^6.3.7" }, @@ -2492,9 +1913,9 @@ } }, "node_modules/@tiptap/extension-history": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/extension-history/-/extension-history-2.10.3.tgz", - "integrity": "sha512-HaSiMdx9Im9Pb9qGlVud7W8bweRDRMez33Uzs5a2x0n1RWkelfH7TwYs41Y3wus8Ujs7kw6qh7jyhvPpQBKaSA==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/extension-history/-/extension-history-2.10.0.tgz", + "integrity": "sha512-5aYOmxqaCnw7e7wmWqFZmkpYCxxDjEzFbgVI6WknqNwqeOizR4+YJf3aAt/lTbksLJe47XF+NBX51gOm/ZBCiw==", "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -2508,6 +1929,7 @@ "version": "2.10.3", "resolved": "https://registry.npmjs.org/@tiptap/extension-image/-/extension-image-2.10.3.tgz", "integrity": "sha512-YIjAF5CwDkMe28OQ5pvnmdRgbJ9JcGMIHY1kyqNunSf2iwphK+6SWz9UEIkDFiT7AsRZySqxFSq93iK1XyTifw==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -2517,9 +1939,9 @@ } }, "node_modules/@tiptap/extension-paragraph": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/extension-paragraph/-/extension-paragraph-2.10.3.tgz", - "integrity": "sha512-sNkTX/iN+YoleDiTJsrWSBw9D7c4vsYwnW5y/G5ydfuJMIRQMF78pWSIWZFDRNOMkgK5UHkhu9anrbCFYgBfaA==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/extension-paragraph/-/extension-paragraph-2.10.0.tgz", + "integrity": "sha512-4LUkVaJYjNdNZ7QOX6TRcA+m7oCtyrLGk49G22wl7XcPBkQPILP1mCUCU4f41bhjfhCgK5PPWP63kMtD+cEACg==", "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -2529,9 +1951,9 @@ } }, "node_modules/@tiptap/extension-placeholder": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/extension-placeholder/-/extension-placeholder-2.10.3.tgz", - "integrity": "sha512-0OkwnDLguZgoiJM85cfnOySuMmPUF7qqw7DHQ+c3zwTAYnvzpvqrvpupc+2Zi9GfC1sDgr+Ajrp8imBHa6PHfA==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/extension-placeholder/-/extension-placeholder-2.10.0.tgz", + "integrity": "sha512-1o6azk2plgYAFgMrV3prnBb1NZjl2V1T3wwnH4n3/h9z9lJ0v5BBAk9r+TRYSrcdXknwwHAWFYnQe6dc9buG2g==", "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -2542,9 +1964,9 @@ } }, "node_modules/@tiptap/extension-text": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/extension-text/-/extension-text-2.10.3.tgz", - "integrity": "sha512-7p9XiRprsRZm8y9jvF/sS929FCELJ5N9FQnbzikOiyGNUx5mdI+exVZlfvBr9xOD5s7fBLg6jj9Vs0fXPNRkPg==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/extension-text/-/extension-text-2.10.0.tgz", + "integrity": "sha512-SSnNncADS1KucdEcJlF6WGCs5+1pAhPrD68vlw34oj3NDT3Zh05KiyXsCV3Nw4wpHOnbWahV+z3uT2SnR+xgoQ==", "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -2554,9 +1976,9 @@ } }, "node_modules/@tiptap/pm": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/pm/-/pm-2.10.3.tgz", - "integrity": "sha512-771p53aU0KFvujvKpngvq2uAxThlEsjYaXcVVmwrhf0vxSSg+psKQEvqvWvHv/3BwkPVCGwmEKNVJZjaXFKu4g==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/pm/-/pm-2.10.0.tgz", + "integrity": "sha512-ohshlWf4MlW6D3rQkNQnhmiQ2w4pwRoQcJmTPt8UJoIDGkeKmZh494fQp4Aeh80XuGd81SsCv//1HJeyaeHJYQ==", "dependencies": { "prosemirror-changeset": "^2.2.1", "prosemirror-collab": "^1.3.1", @@ -2575,7 +1997,7 @@ "prosemirror-tables": "^1.6.1", "prosemirror-trailing-node": "^3.0.0", "prosemirror-transform": "^1.10.2", - "prosemirror-view": "^1.37.0" + "prosemirror-view": "^1.36.0" }, "funding": { "type": "github", @@ -2583,12 +2005,12 @@ } }, "node_modules/@tiptap/react": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/react/-/react-2.10.3.tgz", - "integrity": "sha512-5GBL3arWai8WZuCl1MMA7bT5aWwqDi5AOQhX+hovKjwHvttpKDogRoUBL5k6Eds/eQMBMGTpsfmZlGNiFxSv1g==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/react/-/react-2.10.0.tgz", + "integrity": "sha512-mmh6zeR2AiiOY9F/PGhaARrgFHkxnUQBRlbUMOoNPACZHVYM7D9JDO7KcESVW9oEEZGcy18H3B8wsj2/V3nCig==", "dependencies": { - "@tiptap/extension-bubble-menu": "^2.10.3", - "@tiptap/extension-floating-menu": "^2.10.3", + "@tiptap/extension-bubble-menu": "^2.10.0", + "@tiptap/extension-floating-menu": "^2.10.0", "@types/use-sync-external-store": "^0.0.6", "fast-deep-equal": "^3", "use-sync-external-store": "^1" @@ -2605,9 +2027,9 @@ } }, "node_modules/@tiptap/suggestion": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@tiptap/suggestion/-/suggestion-2.10.3.tgz", - "integrity": "sha512-ReEwiPQoDTXn3RuWnj9D7Aod9dbNQz0QAoLRftWUTdbj3O2ohbvTNX6tlcfS+7x48Q+fAALiJGpp5BtctODlsA==", + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/@tiptap/suggestion/-/suggestion-2.10.0.tgz", + "integrity": "sha512-5vYEfR/EFwWWEio3UYO2PUJolJRzms5CyGBcPklq7NMg7SwKEuZzRiCH1fHE0MK7pnsX83rfw56qcT3sSsNoBA==", "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -2661,6 +2083,15 @@ "integrity": "sha512-K0Oqlrq3kQMaO2RhfrNQX5trmt+XLyom88zS0u84nnIcLvFnRUMRRHmrGny5GSM+kNO9IZLARsdQHDzkhAgmrQ==", "dev": true }, + "node_modules/@types/dompurify": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@types/dompurify/-/dompurify-3.2.0.tgz", + "integrity": "sha512-Fgg31wv9QbLDA0SpTOXO3MaxySc4DKGLi8sna4/Utjo4r3ZRPdCt4UQee8BWr+Q5z21yifghREPJGYaEOEIACg==", + "deprecated": "This is a stub types definition. dompurify provides its own type definitions, so you do not need this installed.", + "dependencies": { + "dompurify": "*" + } + }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", @@ -2671,6 +2102,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", "dependencies": { "@types/unist": "*" } @@ -2679,6 +2111,7 @@ "version": "1.17.15", "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.15.tgz", "integrity": "sha512-25g5atgiVNTIv0LBDTg1H74Hvayx0ajtJPLLcYE3whFv75J0pWNtOBzaXJQgDTmrX1bx5U9YC2w/n65BN1HwRQ==", + "license": "MIT", "dependencies": { "@types/node": "*" } @@ -2722,6 +2155,7 @@ "version": "14.1.2", "resolved": "https://registry.npmjs.org/@types/markdown-it/-/markdown-it-14.1.2.tgz", "integrity": "sha512-promo4eFwuiW+TfGxhi+0x3czqTYJkG8qB17ZUJiVF10Xm7NLVRSLUsfRTU/6h1e24VvRnXCx+hG7li58lkzog==", + "license": "MIT", "dependencies": { "@types/linkify-it": "^5", "@types/mdurl": "^2" @@ -2731,6 +2165,7 @@ "version": "4.0.4", "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", "dependencies": { "@types/unist": "*" } @@ -2744,18 +2179,19 @@ "version": "1.2.5", "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.5.tgz", "integrity": "sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/mocha": { - "version": "10.0.10", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-10.0.10.tgz", - "integrity": "sha512-xPyYSz1cMPnJQhl0CLMH68j3gprKZaTjG3s5Vi+fDgx+uhG9NOXwbVt52eFS8ECyXhyKcjDLCBEqBExKuiZb7Q==", + "version": "10.0.9", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-10.0.9.tgz", + "integrity": "sha512-sicdRoWtYevwxjOHNMPTl3vSfJM6oyW8o1wXeI7uww6b6xHg8eBznQDNSGBCDJmsE8UMxP05JgZRtsKbTqt//Q==", "dev": true }, "node_modules/@types/node": { - "version": "18.19.68", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.68.tgz", - "integrity": "sha512-QGtpFH1vB99ZmTa63K4/FU8twThj4fuVSBkGddTp7uIL/cuoLWIUSL2RcOaigBhfR+hg5pgGkBnkoOxrTVBMKw==", + "version": "18.19.64", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.64.tgz", + "integrity": "sha512-955mDqvO2vFf/oL7V3WiUtiz+BugyX8uVbaT2H8oj3+8dRyH2FLiNdowe7eNqRM7IOIZvzDH76EoAT+gwm6aIQ==", "dependencies": { "undici-types": "~5.26.4" } @@ -2770,15 +2206,15 @@ } }, "node_modules/@types/prop-types": { - "version": "15.7.14", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.14.tgz", - "integrity": "sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ==", + "version": "15.7.13", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.13.tgz", + "integrity": "sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA==", "devOptional": true }, "node_modules/@types/react": { - "version": "18.3.16", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.16.tgz", - "integrity": "sha512-oh8AMIC4Y2ciKufU8hnKgs+ufgbA/dhPTACaZPM86AbwX9QwnFtSoPWEeRUj8fge+v6kFt78BXcDhAU1SrrAsw==", + "version": "18.3.12", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.12.tgz", + "integrity": "sha512-D2wOSq/d6Agt28q7rSI3jhU7G6aiuzljDGZ2hTZHIkrTLUI+AF3WMeKkEZ9nN2fkBAlcktT6vcZjDFiIhMYEQw==", "devOptional": true, "dependencies": { "@types/prop-types": "*", @@ -2786,12 +2222,12 @@ } }, "node_modules/@types/react-dom": { - "version": "18.3.5", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.5.tgz", - "integrity": "sha512-P4t6saawp+b/dFrUr2cvkVsfvPguwsxtH6dNIYRllMsefqFzkZk5UIjzyDOv5g1dXIPdG4Sp1yCR4Z6RCUsG/Q==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-qW1Mfv8taImTthu4KoXgDfLuk4bydU6Q/TkADnDWWHwi4NX4BR+LWfTp2sVmTqRrsHvyDDTelgelxJ+SsejKKQ==", "devOptional": true, - "peerDependencies": { - "@types/react": "^18.0.0" + "dependencies": { + "@types/react": "*" } }, "node_modules/@types/request": { @@ -2857,7 +2293,8 @@ "node_modules/@types/unist": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", - "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" }, "node_modules/@types/use-sync-external-store": { "version": "0.0.6", @@ -2871,22 +2308,22 @@ "dev": true }, "node_modules/@types/vscode": { - "version": "1.96.0", - "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.96.0.tgz", - "integrity": "sha512-qvZbSZo+K4ZYmmDuaodMbAa67Pl6VDQzLKFka6rq+3WUTY4Kro7Bwoi0CuZLO/wema0ygcmpwow7zZfPJTs5jg==", + "version": "1.95.0", + "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.95.0.tgz", + "integrity": "sha512-0LBD8TEiNbet3NvWsmn59zLzOFu/txSlGxnv5yAFHCrhG9WvAnR3IvfHzMOs2aeWqgvNjq9pO99IUw8d3n+unw==", "dev": true }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.18.0.tgz", - "integrity": "sha512-NR2yS7qUqCL7AIxdJUQf2MKKNDVNaig/dEB0GBLU7D+ZdHgK1NoH/3wsgO3OnPVipn51tG3MAwaODEGil70WEw==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.15.0.tgz", + "integrity": "sha512-+zkm9AR1Ds9uLWN3fkoeXgFppaQ+uEVtfOV62dDmsy9QCNqlRHWNEck4yarvRNrvRcHQLGfqBNui3cimoz8XAg==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.18.0", - "@typescript-eslint/type-utils": "8.18.0", - "@typescript-eslint/utils": "8.18.0", - "@typescript-eslint/visitor-keys": "8.18.0", + "@typescript-eslint/scope-manager": "8.15.0", + "@typescript-eslint/type-utils": "8.15.0", + "@typescript-eslint/utils": "8.15.0", + "@typescript-eslint/visitor-keys": "8.15.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2901,20 +2338,24 @@ }, "peerDependencies": { "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0", - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.8.0" + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/@typescript-eslint/parser": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.18.0.tgz", - "integrity": "sha512-hgUZ3kTEpVzKaK3uNibExUYm6SKKOmTU2BOxBSvOYwtJEPdVQ70kZJpPjstlnhCHcuc2WGfSbpKlb/69ttyN5Q==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.15.0.tgz", + "integrity": "sha512-7n59qFpghG4uazrF9qtGKBZXn7Oz4sOMm8dwNWDQY96Xlm2oX67eipqcblDj+oY1lLCbf1oltMZFpUso66Kl1A==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "8.18.0", - "@typescript-eslint/types": "8.18.0", - "@typescript-eslint/typescript-estree": "8.18.0", - "@typescript-eslint/visitor-keys": "8.18.0", + "@typescript-eslint/scope-manager": "8.15.0", + "@typescript-eslint/types": "8.15.0", + "@typescript-eslint/typescript-estree": "8.15.0", + "@typescript-eslint/visitor-keys": "8.15.0", "debug": "^4.3.4" }, "engines": { @@ -2925,18 +2366,22 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.8.0" + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.18.0.tgz", - "integrity": "sha512-PNGcHop0jkK2WVYGotk/hxj+UFLhXtGPiGtiaWgVBVP1jhMoMCHlTyJA+hEj4rszoSdLTK3fN4oOatrL0Cp+Xw==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.15.0.tgz", + "integrity": "sha512-QRGy8ADi4J7ii95xz4UoiymmmMd/zuy9azCaamnZ3FM8T5fZcex8UfJcjkiEZjJSztKfEBe3dZ5T/5RHAmw2mA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "8.18.0", - "@typescript-eslint/visitor-keys": "8.18.0" + "@typescript-eslint/types": "8.15.0", + "@typescript-eslint/visitor-keys": "8.15.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2947,13 +2392,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.18.0.tgz", - "integrity": "sha512-er224jRepVAVLnMF2Q7MZJCq5CsdH2oqjP4dT7K6ij09Kyd+R21r7UVJrF0buMVdZS5QRhDzpvzAxHxabQadow==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.15.0.tgz", + "integrity": "sha512-UU6uwXDoI3JGSXmcdnP5d8Fffa2KayOhUUqr/AiBnG1Gl7+7ut/oyagVeSkh7bxQ0zSXV9ptRh/4N15nkCqnpw==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "8.18.0", - "@typescript-eslint/utils": "8.18.0", + "@typescript-eslint/typescript-estree": "8.15.0", + "@typescript-eslint/utils": "8.15.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2965,14 +2410,18 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.8.0" + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/@typescript-eslint/types": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.18.0.tgz", - "integrity": "sha512-FNYxgyTCAnFwTrzpBGq+zrnoTO4x0c1CKYY5MuUTzpScqmY5fmsh2o3+57lqdI3NZucBDCzDgdEbIaNfAjAHQA==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.15.0.tgz", + "integrity": "sha512-n3Gt8Y/KyJNe0S3yDCD2RVKrHBC4gTUcLTebVBXacPy091E6tNspFLKRXlk3hwT4G55nfr1n2AdFqi/XMxzmPQ==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2983,13 +2432,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.18.0.tgz", - "integrity": "sha512-rqQgFRu6yPkauz+ms3nQpohwejS8bvgbPyIDq13cgEDbkXt4LH4OkDMT0/fN1RUtzG8e8AKJyDBoocuQh8qNeg==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.15.0.tgz", + "integrity": "sha512-1eMp2JgNec/niZsR7ioFBlsh/Fk0oJbhaqO0jRyQBMgkz7RrFfkqF9lYYmBoGBaSiLnu8TAPQTwoTUiSTUW9dg==", "dev": true, "dependencies": { - "@typescript-eslint/types": "8.18.0", - "@typescript-eslint/visitor-keys": "8.18.0", + "@typescript-eslint/types": "8.15.0", + "@typescript-eslint/visitor-keys": "8.15.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", @@ -3004,8 +2453,10 @@ "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, - "peerDependencies": { - "typescript": ">=4.8.4 <5.8.0" + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { @@ -3021,15 +2472,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.18.0.tgz", - "integrity": "sha512-p6GLdY383i7h5b0Qrfbix3Vc3+J2k6QWw6UMUeY5JGfm3C5LbZ4QIZzJNoNOfgyRe0uuYKjvVOsO/jD4SJO+xg==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.15.0.tgz", + "integrity": "sha512-k82RI9yGhr0QM3Dnq+egEpz9qB6Un+WLYhmoNcvl8ltMEededhh7otBVVIDDsEEttauwdY/hQoSsOv13lxrFzQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.18.0", - "@typescript-eslint/types": "8.18.0", - "@typescript-eslint/typescript-estree": "8.18.0" + "@typescript-eslint/scope-manager": "8.15.0", + "@typescript-eslint/types": "8.15.0", + "@typescript-eslint/typescript-estree": "8.15.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -3039,17 +2490,21 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.8.0" + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.18.0.tgz", - "integrity": "sha512-pCh/qEA8Lb1wVIqNvBke8UaRjJ6wrAWkJO5yyIbs8Yx6TNGYyfNjOo61tLv+WwLvoLPp4BQ8B7AHKijl8NGUfw==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.15.0.tgz", + "integrity": "sha512-h8vYOulWec9LhpwfAdZf2bjr8xIp0KNKnpgqSz0qqYYKAW/QZKw3ktRndbiAtUz4acH4QLQavwZBYCc0wulA/Q==", "dev": true, "dependencies": { - "@typescript-eslint/types": "8.18.0", + "@typescript-eslint/types": "8.15.0", "eslint-visitor-keys": "^4.2.0" }, "engines": { @@ -3073,9 +2528,10 @@ } }, "node_modules/@ungap/structured-clone": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.1.tgz", - "integrity": "sha512-fEzPV3hSkSMltkw152tJKNARhOupqbH96MZWyRjNaYZOMIzbrTeQDG+MTc6Mr2pgzFQzFxAfmhGDNP5QK++2ZA==" + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "license": "ISC" }, "node_modules/@vscode/codicons": { "version": "0.0.36", @@ -3166,9 +2622,12 @@ } }, "node_modules/agent-base": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", - "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.1.tgz", + "integrity": "sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==", + "dependencies": { + "debug": "^4.3.4" + }, "engines": { "node": ">= 14" } @@ -3394,9 +2853,9 @@ } }, "node_modules/axios": { - "version": "1.7.9", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.9.tgz", - "integrity": "sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw==", + "version": "1.7.7", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz", + "integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==", "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.0", @@ -3570,44 +3029,16 @@ } }, "node_modules/call-bind": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", - "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dev": true, "dependencies": { - "call-bind-apply-helpers": "^1.0.0", "es-define-property": "^1.0.0", - "get-intrinsic": "^1.2.4", - "set-function-length": "^1.2.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.1.tgz", - "integrity": "sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g==", - "dev": true, - "dependencies": { "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/call-bound": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.2.tgz", - "integrity": "sha512-0lk0PHFe/uz0vl527fG9CgdE9WdafjDbCXvBbs+LUv000TVt2Jjhqbs4Jwm8gz070w8xXyEAxrPOMullsxXeGg==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.8", - "get-intrinsic": "^1.2.5" + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" }, "engines": { "node": ">= 0.4" @@ -3647,9 +3078,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001688", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001688.tgz", - "integrity": "sha512-Nmqpru91cuABu/DTCXbM2NSRHzM2uVHfPnhJ/1zEAJx/ILBRVmz3pzH4N7DZqbdG0gWClsCC05Oj0mJ/1AWMbA==", + "version": "1.0.30001680", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001680.tgz", + "integrity": "sha512-rPQy70G6AGUMnbwS1z6Xg+RkHYPAi18ihs47GH0jcxIG7wArmPgY3XbS2sRdBbxJljp3thdT8BIqv9ccCypiPA==", "dev": true, "funding": [ { @@ -3670,6 +3101,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -3707,6 +3139,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -3716,6 +3149,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -3746,14 +3180,22 @@ } }, "node_modules/class-variance-authority": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", - "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.0.tgz", + "integrity": "sha512-jFI8IQw4hczaL4ALINxqLEXQbWcNjoSkloa4IaufXCJr6QawJyw7tuRysRsrE8w2p/4gGaxKIt/hX3qz/IbD1A==", "dependencies": { - "clsx": "^2.1.1" + "clsx": "2.0.0" }, "funding": { - "url": "https://polar.sh/cva" + "url": "https://joebell.co.uk" + } + }, + "node_modules/class-variance-authority/node_modules/clsx": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", + "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "engines": { + "node": ">=6" } }, "node_modules/cli-cursor": { @@ -3940,6 +3382,7 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -4118,9 +3561,9 @@ } }, "node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", "dependencies": { "ms": "^2.1.3" }, @@ -4213,6 +3656,7 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", "engines": { "node": ">=6" } @@ -4239,6 +3683,7 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", "dependencies": { "dequal": "^2.0.0" }, @@ -4268,9 +3713,9 @@ "dev": true }, "node_modules/dompurify": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.3.tgz", - "integrity": "sha512-U1U5Hzc2MO0oW3DF+G9qYN0aT7atAou4AgI0XjWz061nyBPbdxkfdhfy5uMgGn6+oLFCfn44ZGbdDqCzVmlOWA==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.1.tgz", + "integrity": "sha512-NBHEsc0/kzRYQd+AY6HR6B/IgsqzBABrqJbpCDQII/OK6h7B7LXzweZTDsqSW2LkTRpoxf18YUP+YjGySk6B3w==", "optionalDependencies": { "@types/trusted-types": "^2.0.7" } @@ -4285,20 +3730,6 @@ "tslib": "^2.0.3" } }, - "node_modules/dunder-proto": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.0.tgz", - "integrity": "sha512-9+Sj30DIu+4KvHqMfLUGLFYL2PkURSYMVXJyXe92nFRvlYq5hBjLEhblKB+vkd/WVlUYMWigiY07T91Fkk0+4A==", - "dev": true, - "dependencies": { - "call-bind-apply-helpers": "^1.0.0", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/duplexify": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-4.1.3.tgz", @@ -4325,9 +3756,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.5.73", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.73.tgz", - "integrity": "sha512-8wGNxG9tAG5KhGd3eeA0o6ixhiNdgr0DcHWm85XPCphwZgD1lIEoi6t3VERayWao7SF7AAZTw6oARGJeVjH8Kg==", + "version": "1.5.63", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.63.tgz", + "integrity": "sha512-ddeXKuY9BHo/mw145axlyWjlJ1UBt4WK3AlvkT7W2AbqfRQoacVoRUCF6wL3uIx/8wT9oLKXzI+rFqHHscByaA==", "dev": true }, "node_modules/emoji-regex": { @@ -4339,7 +3770,8 @@ "node_modules/emoji-regex-xs": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/emoji-regex-xs/-/emoji-regex-xs-1.0.0.tgz", - "integrity": "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==" + "integrity": "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==", + "license": "MIT" }, "node_modules/enabled": { "version": "2.0.0", @@ -4448,10 +3880,13 @@ } }, "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", "dev": true, + "dependencies": { + "get-intrinsic": "^1.2.4" + }, "engines": { "node": ">= 0.4" } @@ -4492,14 +3927,14 @@ } }, "node_modules/es-to-primitive": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", - "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", "dev": true, "dependencies": { - "is-callable": "^1.2.7", - "is-date-object": "^1.0.5", - "is-symbol": "^1.0.4" + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -4595,9 +4030,9 @@ } }, "node_modules/eslint": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.16.0.tgz", - "integrity": "sha512-whp8mSQI4C8VXd+fLgSM0lh3UlmcFtVwUQjyKCFfsp+2ItAIYhlq/hqGahGqHE6cv9unM41VlqKk2VtKYR2TaA==", + "version": "9.15.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.15.0.tgz", + "integrity": "sha512-7CrWySmIibCgT1Os28lUU6upBshZ+GxybLOrmRzi08kS8MBuO8QA7pXEgYgY5W8vK3e74xv0lpjo9DbaGU9Rkw==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", @@ -4605,7 +4040,7 @@ "@eslint/config-array": "^0.19.0", "@eslint/core": "^0.9.0", "@eslint/eslintrc": "^3.2.0", - "@eslint/js": "9.16.0", + "@eslint/js": "9.15.0", "@eslint/plugin-kit": "^0.2.3", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", @@ -4809,7 +4244,8 @@ "node_modules/eventemitter3": { "version": "4.0.7", "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT" }, "node_modules/extend": { "version": "3.0.2", @@ -4972,10 +4408,11 @@ "dev": true }, "node_modules/flow-remove-types": { - "version": "2.256.0", - "resolved": "https://registry.npmjs.org/flow-remove-types/-/flow-remove-types-2.256.0.tgz", - "integrity": "sha512-R2SbUlCUdQb69fvU2dXRfM+gP+qItF5rMzebxmTzJX556i3BNExzc9oONWC1da6camios6fuo+dAJ6cyRbbhpQ==", + "version": "2.257.0", + "resolved": "https://registry.npmjs.org/flow-remove-types/-/flow-remove-types-2.257.0.tgz", + "integrity": "sha512-sa6Y9MuNqOwg/U4e+i68nJOsD7Yv8Cqt3BZDEGTqIahQStMOOW4ongSJMpnGi21VKLu5Nu8WQeB7DQS6CpolkQ==", "dev": true, + "license": "MIT", "dependencies": { "hermes-parser": "0.25.1", "pirates": "^3.0.2", @@ -4989,6 +4426,19 @@ "node": ">=4" } }, + "node_modules/flow-remove-types/node_modules/pirates": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-3.0.2.tgz", + "integrity": "sha512-c5CgUJq6H2k6MJz72Ak1F5sN9n9wlSlJyEnwvpm9/y3WB4E3pHBDT2c6PEiS1vyJvq2bUxUAIu0EGf8Cx4Ic7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "node-modules-regexp": "^1.0.0" + }, + "engines": { + "node": ">= 4" + } + }, "node_modules/fn.name": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", @@ -5106,20 +4556,6 @@ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, "node_modules/function-bind": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", @@ -5261,21 +4697,16 @@ } }, "node_modules/get-intrinsic": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.6.tgz", - "integrity": "sha512-qxsEs+9A+u85HhllWJJFicJfPDhRmjzoYdl64aMWW9yRIJmSyxdn8IEkuIM530/7T+lv0TIHd8L6Q/ra0tEoeA==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dev": true, "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "dunder-proto": "^1.0.0", - "es-define-property": "^1.0.1", "es-errors": "^1.3.0", - "es-object-atoms": "^1.0.0", "function-bind": "^1.1.2", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.0.0" + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" }, "engines": { "node": ">= 0.4" @@ -5382,12 +4813,12 @@ } }, "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", "dev": true, - "engines": { - "node": ">= 0.4" + "dependencies": { + "get-intrinsic": "^1.1.3" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -5448,13 +4879,10 @@ } }, "node_modules/has-proto": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", - "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", "dev": true, - "dependencies": { - "dunder-proto": "^1.0.0" - }, "engines": { "node": ">= 0.4" }, @@ -5463,9 +4891,9 @@ } }, "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", "dev": true, "engines": { "node": ">= 0.4" @@ -5505,6 +4933,7 @@ "version": "9.0.3", "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.3.tgz", "integrity": "sha512-M17uBDzMJ9RPCqLMO92gNNUDuBSq10a25SDBI08iCCxmorf4Yy6sYHK57n9WAbRAAaU+DuR4W6GN9K4DFZesYg==", + "license": "MIT", "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", @@ -5527,6 +4956,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", "dependencies": { "@types/hast": "^3.0.0" }, @@ -5548,13 +4978,15 @@ "version": "0.25.1", "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/hermes-parser": { "version": "0.25.1", "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", "dev": true, + "license": "MIT", "dependencies": { "hermes-estree": "0.25.1" } @@ -5601,6 +5033,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -5610,6 +5043,7 @@ "version": "1.18.1", "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "license": "MIT", "dependencies": { "eventemitter3": "^4.0.0", "follow-redirects": "^1.0.0", @@ -5632,11 +5066,11 @@ } }, "node_modules/https-proxy-agent": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", - "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz", + "integrity": "sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==", "dependencies": { - "agent-base": "^7.1.2", + "agent-base": "^7.0.2", "debug": "4" }, "engines": { @@ -5800,31 +5234,13 @@ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", "dev": true }, - "node_modules/is-async-function": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", - "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==", - "dev": true, - "dependencies": { - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-bigint": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", - "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", "dev": true, "dependencies": { - "has-bigints": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" + "has-bigints": "^1.0.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -5843,13 +5259,13 @@ } }, "node_modules/is-boolean-object": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.0.tgz", - "integrity": "sha512-kR5g0+dXf/+kXnqI+lu0URKYPKgICtHGGNCDSB10AaUFj3o/HkB3u7WfpRBJGFopxxY0oH3ux7ZsDjLtK7xqvw==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", - "has-tostringtag": "^1.0.2" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -5886,13 +5302,11 @@ } }, "node_modules/is-data-view": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", - "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz", + "integrity": "sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==", "dev": true, "dependencies": { - "call-bound": "^1.0.2", - "get-intrinsic": "^1.2.6", "is-typed-array": "^1.1.13" }, "engines": { @@ -5925,21 +5339,6 @@ "node": ">=0.10.0" } }, - "node_modules/is-finalizationregistry": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.0.tgz", - "integrity": "sha512-qfMdqbAQEwBw78ZyReKnlA8ezmPdb9BemzIIip/JkjaZUhitfXDkkr+3QTboW0JrSXT1QWyYShpvnNHGZ4c4yA==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.7" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -5949,21 +5348,6 @@ "node": ">=8" } }, - "node_modules/is-generator-function": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", - "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", - "dev": true, - "dependencies": { - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -5987,18 +5371,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-map": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", - "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-negative-zero": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", @@ -6020,13 +5392,12 @@ } }, "node_modules/is-number-object": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.0.tgz", - "integrity": "sha512-KVSZV0Dunv9DTPkhXwcZ3Q+tUc9TsaE1ZwX5J2WMvsSGS6Md8TFPun5uwh0yRdrNerI6vf/tbJxqSx4c1ZI1Lw==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", + "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", - "has-tostringtag": "^1.0.2" + "has-tostringtag": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -6050,28 +5421,14 @@ "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==" }, "node_modules/is-regex": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", - "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", "dev": true, "dependencies": { - "call-bound": "^1.0.2", - "gopd": "^1.2.0", - "has-tostringtag": "^1.0.2", - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-set": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", - "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", - "dev": true, "engines": { "node": ">= 0.4" }, @@ -6106,13 +5463,12 @@ } }, "node_modules/is-string": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.0.tgz", - "integrity": "sha512-PlfzajuF9vSo5wErv3MJAKD/nqf9ngAs1NFQYm16nUYFO2IzxJ2hcm+IOCg+EEopdykNNUhVq5cz35cAUxU8+g==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", - "has-tostringtag": "^1.0.2" + "has-tostringtag": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -6122,14 +5478,12 @@ } }, "node_modules/is-symbol": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.0.tgz", - "integrity": "sha512-qS8KkNNXUZ/I+nX6QT8ZS1/Yx0A444yhzdTKxCzKkNjQ9sHErBxJnJAgh+f5YhusYECEcjo4XcyH87hn6+ks0A==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", - "has-symbols": "^1.0.3", - "safe-regex-test": "^1.0.3" + "has-symbols": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -6165,18 +5519,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-weakmap": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", - "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-weakref": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", @@ -6189,22 +5531,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-weakset": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.3.tgz", - "integrity": "sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.7", - "get-intrinsic": "^1.2.4" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/isarray": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", @@ -6218,11 +5544,12 @@ "dev": true }, "node_modules/isomorphic-dompurify": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/isomorphic-dompurify/-/isomorphic-dompurify-2.19.0.tgz", - "integrity": "sha512-ppcgeRlEwOQ+v/JDctcjnOsBwEoJlAWVDH5+LisLHphQFeWCrBiVvK6XF4wF0MJM5tJA6RxJSlpbmthnmonxOQ==", + "version": "2.16.0", + "resolved": "https://registry.npmjs.org/isomorphic-dompurify/-/isomorphic-dompurify-2.16.0.tgz", + "integrity": "sha512-cXhX2owp8rPxafCr0ywqy2CGI/4ceLNgWkWBEvUz64KTbtg3oRL2ZRqq/zW0pzt4YtDjkHLbwcp/lozpKzAQjg==", "dependencies": { - "dompurify": "^3.2.3", + "@types/dompurify": "^3.0.5", + "dompurify": "^3.1.7", "jsdom": "^25.0.1" }, "engines": { @@ -6369,6 +5696,7 @@ "version": "8.18.0", "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", + "license": "MIT", "engines": { "node": ">=10.0.0" }, @@ -6386,9 +5714,9 @@ } }, "node_modules/jsesc": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", - "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz", + "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==", "dev": true, "bin": { "jsesc": "bin/jsesc" @@ -6551,15 +5879,12 @@ } }, "node_modules/lilconfig": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", - "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", "dev": true, "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/antonk52" + "node": ">=10" } }, "node_modules/lines-and-columns": { @@ -6733,6 +6058,7 @@ "version": "14.1.0", "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz", "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", + "license": "MIT", "dependencies": { "argparse": "^2.0.1", "entities": "^4.4.0", @@ -6745,19 +6071,11 @@ "markdown-it": "bin/markdown-it.mjs" } }, - "node_modules/math-intrinsics": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.0.0.tgz", - "integrity": "sha512-4MqMiKP90ybymYvsut0CH2g4XWbfLtmlCkXmtmdcDCxNB+mQcu1w/1+L/VD7vi/PSv7X2JYV7SCcR+jiPXnQtA==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/mdast-util-to-hast": { "version": "13.2.0", "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "license": "MIT", "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", @@ -6810,6 +6128,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" @@ -6828,7 +6147,8 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT" }, "node_modules/micromark-util-sanitize-uri": { "version": "2.0.1", @@ -6844,6 +6164,7 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-encode": "^2.0.0", @@ -6863,7 +6184,8 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT" }, "node_modules/micromark-util-types": { "version": "2.0.1", @@ -6878,7 +6200,8 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT" }, "node_modules/micromatch": { "version": "4.0.8", @@ -6959,6 +6282,7 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -6973,9 +6297,9 @@ } }, "node_modules/minisearch": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/minisearch/-/minisearch-7.1.1.tgz", - "integrity": "sha512-b3YZEYCEH4EdCAtYP7OlDyx7FdPwNzuNwLQ34SfJpM9dlbBZzeXndGavTrC+VCiRWomL21SWfMc6SCKO/U2ZNw==" + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/minisearch/-/minisearch-7.1.0.tgz", + "integrity": "sha512-tv7c/uefWdEhcu6hvrfTihflgeEi2tN6VV7HJnCjK6VxM75QQJh4t9FwJCsA2EsRS8LCnu3W87CuGPWMocOLCA==" }, "node_modules/mkdirp": { "version": "2.1.6", @@ -7186,9 +6510,9 @@ } }, "node_modules/nanoid": { - "version": "3.3.8", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", - "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", "dev": true, "funding": [ { @@ -7272,14 +6596,15 @@ "resolved": "https://registry.npmjs.org/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz", "integrity": "sha512-JMaRS9L4wSRIR+6PTVEikTrq/lMGEZR43a48ETeilY0Q0iMwVnccMFrUM1k+tNzmYuIU0Vh710bCUqHX+/+ctQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/node-releases": { - "version": "2.0.19", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", - "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==", "dev": true }, "node_modules/normalize-package-data": { @@ -7507,9 +6832,9 @@ } }, "node_modules/nwsapi": { - "version": "2.2.16", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.16.tgz", - "integrity": "sha512-F1I/bimDpj3ncaNDhfyMWuFqmQDBwDB0Fogc2qpL3BWvkQteFD/8BzWuIRl83rq0DXfm8SGt/HFhLXZyljTXcQ==" + "version": "2.2.13", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.13.tgz", + "integrity": "sha512-cTGB9ptp9dY9A5VbMSe7fQBcl/tt22Vcqdq8+eN93rblOuE0aCFu4aZ2vMwct/2t+lFnosm8RkQW1I0Omb1UtQ==" }, "node_modules/object-assign": { "version": "4.1.1", @@ -7600,19 +6925,20 @@ } }, "node_modules/oniguruma-to-es": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-0.7.0.tgz", - "integrity": "sha512-HRaRh09cE0gRS3+wi2zxekB+I5L8C/gN60S+vb11eADHUaB/q4u8wGGOX3GvwvitG8ixaeycZfeoyruKQzUgNg==", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-0.4.1.tgz", + "integrity": "sha512-rNcEohFz095QKGRovP/yqPIKc+nP+Sjs4YTHMv33nMePGKrq/r2eu9Yh4646M5XluGJsUnmwoXuiXE69KDs+fQ==", + "license": "MIT", "dependencies": { "emoji-regex-xs": "^1.0.0", - "regex": "^5.0.2", - "regex-recursion": "^4.3.0" + "regex": "^5.0.0", + "regex-recursion": "^4.2.1" } }, "node_modules/openai": { - "version": "4.76.1", - "resolved": "https://registry.npmjs.org/openai/-/openai-4.76.1.tgz", - "integrity": "sha512-ci63/WFEMd6QjjEVeH0pV7hnFS6CCqhgJydSti4Aak/8uo2SpgzKjteUDaY+OkwziVj11mi6j+0mRUIiGKUzWw==", + "version": "4.72.0", + "resolved": "https://registry.npmjs.org/openai/-/openai-4.72.0.tgz", + "integrity": "sha512-hFqG9BWCs7L7ifrhJXw7mJXmUBr7d9N6If3J9563o0jfwVA4wFANFDDaOIWFdgDdwgCXg5emf0Q+LoLCGszQYA==", "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", @@ -7964,15 +7290,12 @@ } }, "node_modules/pirates": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-3.0.2.tgz", - "integrity": "sha512-c5CgUJq6H2k6MJz72Ak1F5sN9n9wlSlJyEnwvpm9/y3WB4E3pHBDT2c6PEiS1vyJvq2bUxUAIu0EGf8Cx4Ic7Q==", + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", "dev": true, - "dependencies": { - "node-modules-regexp": "^1.0.0" - }, "engines": { - "node": ">= 4" + "node": ">= 6" } }, "node_modules/possible-typed-array-names": { @@ -8083,6 +7406,18 @@ } } }, + "node_modules/postcss-load-config/node_modules/lilconfig": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, "node_modules/postcss-modules": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/postcss-modules/-/postcss-modules-6.0.1.tgz", @@ -8115,9 +7450,9 @@ } }, "node_modules/postcss-modules-local-by-default": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.2.0.tgz", - "integrity": "sha512-5kcJm/zk+GJDSfw+V/42fJ5fhjL5YbFDl8nVdXkJPLLW+Vf9mTD5Xe0wqIaDnLuL2U6cDNpTr+UQ+v2HWIBhzw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.1.0.tgz", + "integrity": "sha512-rm0bdSv4jC3BDma3s9H19ZddW0aHX6EoqwDYU2IfZhRN+53QrufTRo2IdkAbRqLx4R2IYbZnbjKKxg4VN5oU9Q==", "dev": true, "dependencies": { "icss-utils": "^5.0.0", @@ -8240,9 +7575,9 @@ } }, "node_modules/prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", + "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", "dev": true, "bin": { "prettier": "bin/prettier.cjs" @@ -8342,6 +7677,7 @@ "version": "6.5.0", "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -8445,9 +7781,9 @@ } }, "node_modules/prosemirror-model": { - "version": "1.24.1", - "resolved": "https://registry.npmjs.org/prosemirror-model/-/prosemirror-model-1.24.1.tgz", - "integrity": "sha512-YM053N+vTThzlWJ/AtPtF1j0ebO36nvbmDy4U7qA2XQB8JVaQp1FmB9Jhrps8s+z+uxhhVTny4m20ptUvhk0Mg==", + "version": "1.23.0", + "resolved": "https://registry.npmjs.org/prosemirror-model/-/prosemirror-model-1.23.0.tgz", + "integrity": "sha512-Q/fgsgl/dlOAW9ILu4OOhYWQbc7TQd4BwKH/RwmUjyVf8682Be4zj3rOYdLnYEcGzyg8LL9Q5IWYKD8tdToreQ==", "dependencies": { "orderedmap": "^2.0.0" } @@ -8461,9 +7797,9 @@ } }, "node_modules/prosemirror-schema-list": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/prosemirror-schema-list/-/prosemirror-schema-list-1.5.0.tgz", - "integrity": "sha512-gg1tAfH1sqpECdhIHOA/aLg2VH3ROKBWQ4m8Qp9mBKrOxQRW61zc+gMCI8nh22gnBzd1t2u1/NPLmO3nAa3ssg==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/prosemirror-schema-list/-/prosemirror-schema-list-1.4.1.tgz", + "integrity": "sha512-jbDyaP/6AFfDfu70VzySsD75Om2t3sXTOdl5+31Wlxlg62td1haUpty/ybajSfJ1pkGadlOfwQq9kgW5IMo1Rg==", "dependencies": { "prosemirror-model": "^1.0.0", "prosemirror-state": "^1.0.0", @@ -8515,9 +7851,9 @@ } }, "node_modules/prosemirror-view": { - "version": "1.37.1", - "resolved": "https://registry.npmjs.org/prosemirror-view/-/prosemirror-view-1.37.1.tgz", - "integrity": "sha512-MEAnjOdXU1InxEmhjgmEzQAikaS6lF3hD64MveTPpjOGNTl87iRLA1HupC/DEV6YuK7m4Q9DHFNTjwIVtqz5NA==", + "version": "1.36.0", + "resolved": "https://registry.npmjs.org/prosemirror-view/-/prosemirror-view-1.36.0.tgz", + "integrity": "sha512-U0GQd5yFvV5qUtT41X1zCQfbw14vkbbKwLlQXhdylEmgpYVHkefXYcC4HHwWOfZa3x6Y8wxDLUBv7dxN5XQ3nA==", "dependencies": { "prosemirror-model": "^1.20.0", "prosemirror-state": "^1.0.0", @@ -8762,40 +8098,20 @@ "node": ">=8.10.0" } }, - "node_modules/reflect.getprototypeof": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.8.tgz", - "integrity": "sha512-B5dj6usc5dkk8uFliwjwDHM8To5/QwdKz9JcBZ8Ic4G1f0YmeeJTtE/ZTdgRFPAfxZFiUaPhZ1Jcs4qeagItGQ==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.8", - "define-properties": "^1.2.1", - "dunder-proto": "^1.0.0", - "es-abstract": "^1.23.5", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "gopd": "^1.2.0", - "which-builtin-type": "^1.2.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/regex": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/regex/-/regex-5.0.2.tgz", "integrity": "sha512-/pczGbKIQgfTMRV0XjABvc5RzLqQmwqxLHdQao2RTXPk+pmTXB2P0IaUHYdYyk412YLwUIkaeMd5T+RzVgTqnQ==", + "license": "MIT", "dependencies": { "regex-utilities": "^2.3.0" } }, "node_modules/regex-recursion": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-4.3.0.tgz", - "integrity": "sha512-5LcLnizwjcQ2ALfOj95MjcatxyqF5RPySx9yT+PaXu3Gox2vyAtLDjHB8NTJLtMGkvyau6nI3CfpwFCjPUIs/A==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-4.2.1.tgz", + "integrity": "sha512-QHNZyZAeKdndD1G3bKAbBEKOSSK4KOHQrAJ01N1LJeb0SoH4DJIeFhp0uUpETgONifS4+P3sOgoA1dhzgrQvhA==", + "license": "MIT", "dependencies": { "regex-utilities": "^2.3.0" } @@ -8803,7 +8119,8 @@ "node_modules/regex-utilities": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/regex-utilities/-/regex-utilities-2.3.0.tgz", - "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==" + "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==", + "license": "MIT" }, "node_modules/regexp.prototype.flags": { "version": "1.5.3", @@ -8835,7 +8152,8 @@ "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "license": "MIT" }, "node_modules/resolve": { "version": "1.22.8", @@ -8953,15 +8271,14 @@ "integrity": "sha512-cLgakCUf6PedEu15t8kbsjnwIFFR2D4RfL+W3iWFJ4iac7z4B0ZI8fxy4R3J956kAI68HclCFGL8MPoUVC3qVA==" }, "node_modules/safe-array-concat": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", - "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz", + "integrity": "sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==", "dev": true, "dependencies": { - "call-bind": "^1.0.8", - "call-bound": "^1.0.2", - "get-intrinsic": "^1.2.6", - "has-symbols": "^1.1.0", + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4", + "has-symbols": "^1.0.3", "isarray": "^2.0.5" }, "engines": { @@ -9021,9 +8338,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "node_modules/sass": { - "version": "1.82.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.82.0.tgz", - "integrity": "sha512-j4GMCTa8elGyN9A7x7bEglx0VgSpNUG4W4wNedQ33wSMdnkqQCT8HTwOaVSV4e6yQovcu/3Oc4coJP/l0xhL2Q==", + "version": "1.81.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.81.0.tgz", + "integrity": "sha512-Q4fOxRfhmv3sqCLoGfvrC9pRV8btc0UtqL9mN6Yrv6Qi9ScL55CVH1vlPP863ISLEEMNLLuu9P+enCeGHlnzhA==", "dev": true, "dependencies": { "chokidar": "^4.0.0", @@ -9170,94 +8487,38 @@ } }, "node_modules/shell-quote": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.2.tgz", - "integrity": "sha512-AzqKpGKjrj7EM6rKVQEPpB288oCfnrEIuyoT9cyF4nmGa7V8Zk6f7RRqYisX8X9m+Q7bd632aZW4ky7EhbQztA==", + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", "dev": true, - "engines": { - "node": ">= 0.4" - }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/shiki": { - "version": "1.24.2", - "resolved": "https://registry.npmjs.org/shiki/-/shiki-1.24.2.tgz", - "integrity": "sha512-TR1fi6mkRrzW+SKT5G6uKuc32Dj2EEa7Kj0k8kGqiBINb+C1TiflVOiT9ta6GqOJtC4fraxO5SLUaKBcSY38Fg==", - "dependencies": { - "@shikijs/core": "1.24.2", - "@shikijs/engine-javascript": "1.24.2", - "@shikijs/engine-oniguruma": "1.24.2", - "@shikijs/types": "1.24.2", + "version": "1.23.1", + "resolved": "https://registry.npmjs.org/shiki/-/shiki-1.23.1.tgz", + "integrity": "sha512-8kxV9TH4pXgdKGxNOkrSMydn1Xf6It8lsle0fiqxf7a1149K1WGtdOu3Zb91T5r1JpvRPxqxU3C2XdZZXQnrig==", + "license": "MIT", + "dependencies": { + "@shikijs/core": "1.23.1", + "@shikijs/engine-javascript": "1.23.1", + "@shikijs/engine-oniguruma": "1.23.1", + "@shikijs/types": "1.23.1", "@shikijs/vscode-textmate": "^9.3.0", "@types/hast": "^3.0.4" } }, "node_modules/side-channel": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", - "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", - "dev": true, - "dependencies": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.3", - "side-channel-list": "^1.0.0", - "side-channel-map": "^1.0.1", - "side-channel-weakmap": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-list": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", - "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", - "dev": true, - "dependencies": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-map": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", - "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", - "dev": true, - "dependencies": { - "call-bound": "^1.0.2", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-weakmap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", - "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dev": true, "dependencies": { - "call-bound": "^1.0.2", + "call-bind": "^1.0.7", "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3", - "side-channel-map": "^1.0.1" + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" }, "engines": { "node": ">= 0.4" @@ -9314,6 +8575,7 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -9479,18 +8741,15 @@ } }, "node_modules/string.prototype.trim": { - "version": "1.2.10", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", - "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz", + "integrity": "sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==", "dev": true, "dependencies": { - "call-bind": "^1.0.8", - "call-bound": "^1.0.2", - "define-data-property": "^1.1.4", + "call-bind": "^1.0.7", "define-properties": "^1.2.1", - "es-abstract": "^1.23.5", - "es-object-atoms": "^1.0.0", - "has-property-descriptors": "^1.0.2" + "es-abstract": "^1.23.0", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -9500,19 +8759,15 @@ } }, "node_modules/string.prototype.trimend": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", - "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz", + "integrity": "sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.8", - "call-bound": "^1.0.2", + "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" }, - "engines": { - "node": ">= 0.4" - }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -9538,6 +8793,7 @@ "version": "4.0.4", "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", "dependencies": { "character-entities-html4": "^2.0.0", "character-entities-legacy": "^3.0.0" @@ -9637,15 +8893,6 @@ "node": ">=16 || 14 >=14.17" } }, - "node_modules/sucrase/node_modules/pirates": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", - "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, "node_modules/supports-color": { "version": "9.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-9.4.0.tgz", @@ -9682,18 +8929,18 @@ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==" }, "node_modules/tailwind-merge": { - "version": "2.5.5", - "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.5.5.tgz", - "integrity": "sha512-0LXunzzAZzo0tEPxV3I297ffKZPlKDrjj7NXphC8V5ak9yHC5zRmxnOe2m/Rd/7ivsOMJe3JZ2JVocoDdQTRBA==", + "version": "2.5.4", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.5.4.tgz", + "integrity": "sha512-0q8cfZHMu9nuYP/b5Shb7Y7Sh1B7Nnl5GqNr1U+n2p6+mybvRtayrQ+0042Z5byvTA8ihjlP8Odo8/VnHbZu4Q==", "funding": { "type": "github", "url": "https://github.com/sponsors/dcastil" } }, "node_modules/tailwindcss": { - "version": "3.4.16", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.16.tgz", - "integrity": "sha512-TI4Cyx7gDiZ6r44ewaJmt0o6BrMCT5aK5e0rmJ/G9Xq3w7CX/5VXl/zIPEJZFUK5VEqwByyhqNPycPlvcK4ZNw==", + "version": "3.4.15", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.15.tgz", + "integrity": "sha512-r4MeXnfBmSOuKUWmXe6h2CcyfzJCEk4F0pptO5jlnYSIViUkVmsawj80N5h2lO3gwcmSb4n3PuN+e+GC1Guylw==", "dev": true, "dependencies": { "@alloc/quick-lru": "^5.2.0", @@ -9705,7 +8952,7 @@ "glob-parent": "^6.0.2", "is-glob": "^4.0.3", "jiti": "^1.21.6", - "lilconfig": "^3.1.3", + "lilconfig": "^2.1.0", "micromatch": "^4.0.8", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", @@ -9954,20 +9201,20 @@ } }, "node_modules/tldts": { - "version": "6.1.67", - "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.67.tgz", - "integrity": "sha512-714VbegxoZ9WF5/IsVCy9rWXKUpPkJq87ebWLXQzNawce96l5oRrRf2eHzB4pT2g/4HQU1dYbu+sdXClYxlDKQ==", + "version": "6.1.61", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.61.tgz", + "integrity": "sha512-rv8LUyez4Ygkopqn+M6OLItAOT9FF3REpPQDkdMx5ix8w4qkuE7Vo2o/vw1nxKQYmJDV8JpAMJQr1b+lTKf0FA==", "dependencies": { - "tldts-core": "^6.1.67" + "tldts-core": "^6.1.61" }, "bin": { "tldts": "bin/cli.js" } }, "node_modules/tldts-core": { - "version": "6.1.67", - "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.67.tgz", - "integrity": "sha512-12K5O4m3uUW6YM5v45Z7wc6NTSmAYj4Tq3de7eXghZkp879IlfPJrUWeWFwu1FS94U5t2vwETgJ1asu8UGNKVQ==" + "version": "6.1.61", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.61.tgz", + "integrity": "sha512-In7VffkDWUPgwa+c9picLUxvb0RltVwTkSgMNFgvlGSWveCzGBemBqTsgJCL4EDFWZ6WH0fKTsot6yNhzy3ZzQ==" }, "node_modules/to-regex-range": { "version": "5.0.1", @@ -10006,6 +9253,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -10020,9 +9268,9 @@ } }, "node_modules/ts-api-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz", - "integrity": "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.0.tgz", + "integrity": "sha512-032cPxaEKwM+GT3vA5JXNzIaizx388rhsSW79vGRNGXfRRAdEAn2mvk36PvK5HnOchyWZ7afLEXqYCvPCrzuzQ==", "dev": true, "engines": { "node": ">=16" @@ -10097,9 +9345,9 @@ } }, "node_modules/typed-array-byte-offset": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.3.tgz", - "integrity": "sha512-GsvTyUHTriq6o/bHcTd0vM7OQ9JEdlvluu9YISaA7+KzDzPaIzEeDFNkTfhdE3MYcNhNi0vq/LlegYgIs5yPAw==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz", + "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==", "dev": true, "dependencies": { "available-typed-arrays": "^1.0.7", @@ -10107,8 +9355,7 @@ "for-each": "^0.3.3", "gopd": "^1.0.1", "has-proto": "^1.0.3", - "is-typed-array": "^1.1.13", - "reflect.getprototypeof": "^1.0.6" + "is-typed-array": "^1.1.13" }, "engines": { "node": ">= 0.4" @@ -10118,17 +9365,17 @@ } }, "node_modules/typed-array-length": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", - "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.6.tgz", + "integrity": "sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==", "dev": true, "dependencies": { "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", + "has-proto": "^1.0.3", "is-typed-array": "^1.1.13", - "possible-typed-array-names": "^1.0.0", - "reflect.getprototypeof": "^1.0.6" + "possible-typed-array-names": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -10138,9 +9385,9 @@ } }, "node_modules/typescript": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", - "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", + "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -10179,6 +9426,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0" }, @@ -10191,6 +9439,7 @@ "version": "5.0.0", "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0" }, @@ -10203,6 +9452,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0" }, @@ -10215,6 +9465,7 @@ "version": "5.0.0", "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", @@ -10229,6 +9480,7 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" @@ -10319,11 +9571,11 @@ } }, "node_modules/use-sync-external-store": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.4.0.tgz", - "integrity": "sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz", + "integrity": "sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==", "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, "node_modules/util-deprecate": { @@ -10371,6 +9623,7 @@ "version": "6.0.3", "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" @@ -10384,6 +9637,7 @@ "version": "4.0.2", "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "license": "MIT", "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0" @@ -10397,7 +9651,8 @@ "version": "0.2.3", "resolved": "https://registry.npmjs.org/vlq/-/vlq-0.2.3.tgz", "integrity": "sha512-DRibZL6DsNhIgYQ+wNdWDL2SL3bKPlVrRiBqV5yuMm++op8W4kGFtaQfCs4KEJn0wBZcHVHJ3eoywX8983k1ow==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/vscode-uri": { "version": "3.0.8", @@ -10461,9 +9716,9 @@ } }, "node_modules/whatwg-url": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.1.0.tgz", - "integrity": "sha512-jlf/foYIKywAt3x/XWKZ/3rz8OSJPiWktjmk891alJUEjiVxKX9LEO92qH3hv4aJ0mN3MWPvGMCy8jQi95xK4w==", + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.0.0.tgz", + "integrity": "sha512-1lfMEm2IEr7RIV+f4lUNPOqfFL+pO+Xw3fJSqmjX9AbXcXcYOkCe1P6+9VBZB6n94af16NfZf+sSk0JCBZC9aw==", "dependencies": { "tr46": "^5.0.0", "webidl-conversions": "^7.0.0" @@ -10488,73 +9743,25 @@ } }, "node_modules/which-boxed-primitive": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.0.tgz", - "integrity": "sha512-Ei7Miu/AXe2JJ4iNF5j/UphAgRoma4trE6PtisM09bPygb3egMH3YLW/befsWb1A1AxvNSFidOFTB18XtnIIng==", - "dev": true, - "dependencies": { - "is-bigint": "^1.1.0", - "is-boolean-object": "^1.2.0", - "is-number-object": "^1.1.0", - "is-string": "^1.1.0", - "is-symbol": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/which-builtin-type": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.0.tgz", - "integrity": "sha512-I+qLGQ/vucCby4tf5HsLmGueEla4ZhwTBSqaooS+Y0BuxN4Cp+okmGuV+8mXZ84KDI9BA+oklo+RzKg0ONdSUA==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.7", - "function.prototype.name": "^1.1.6", - "has-tostringtag": "^1.0.2", - "is-async-function": "^2.0.0", - "is-date-object": "^1.0.5", - "is-finalizationregistry": "^1.1.0", - "is-generator-function": "^1.0.10", - "is-regex": "^1.1.4", - "is-weakref": "^1.0.2", - "isarray": "^2.0.5", - "which-boxed-primitive": "^1.0.2", - "which-collection": "^1.0.2", - "which-typed-array": "^1.1.15" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/which-collection": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", - "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", + "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", "dev": true, "dependencies": { - "is-map": "^2.0.3", - "is-set": "^2.0.3", - "is-weakmap": "^2.0.2", - "is-weakset": "^2.0.3" - }, - "engines": { - "node": ">= 0.4" + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/which-typed-array": { - "version": "1.1.16", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.16.tgz", - "integrity": "sha512-g+N+GAWiRj66DngFwHvISJd+ITsyphZvD1vChfVg6cEdnzy53GzB3oy0fUNlvhz7H7+MiqhYr26qxQShCpKTTQ==", + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.15.tgz", + "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==", "dev": true, "dependencies": { "available-typed-arrays": "^1.0.7", @@ -10727,6 +9934,7 @@ "version": "7.5.10", "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "license": "MIT", "engines": { "node": ">=8.3.0" }, @@ -10878,17 +10086,17 @@ } }, "node_modules/zod": { - "version": "3.24.1", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.1.tgz", - "integrity": "sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A==", + "version": "3.23.8", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz", + "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==", "funding": { "url": "https://github.com/sponsors/colinhacks" } }, "node_modules/zustand": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.2.tgz", - "integrity": "sha512-8qNdnJVJlHlrKXi50LDqqUNmUbuBjoKLrYQBnoChIbVph7vni+sY+YpvdjXG9YLd/Bxr6scMcR+rm5H3aSqPaw==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.1.tgz", + "integrity": "sha512-pRET7Lao2z+n5R/HduXMio35TncTlSW68WsYBq2Lg1ASspsNGjpwLAsij3RpouyV6+kHMwwwzP0bZPD70/Jx/w==", "engines": { "node": ">=12.20.0" }, @@ -10917,6 +10125,7 @@ "version": "2.0.4", "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" diff --git a/package.json b/package.json index 4c9b13e..04044c5 100644 --- a/package.json +++ b/package.json @@ -6,7 +6,8 @@ "version": "0.0.11", "private": true, "engines": { - "vscode": "^1.84.0" + "vscode": "^1.84.0", + "python": ">=3.9.0" }, "repository": { "type": "git", @@ -111,7 +112,9 @@ "check-types": "tsc --noEmit", "lint": "eslint src", "test": "vscode-test", - "format": "prettier --write \"**/*.{js,jsx,ts,tsx,json,css,scss,md}\"" + "format": "prettier --write \"**/*.{js,jsx,ts,tsx,json,css,scss,md}\"", + "prepare": "chmod +x scripts/codespace-setup.sh", + "postinstall": "if [ \"$CODESPACES\" = \"true\" ]; then ./scripts/codespace-setup.sh; fi" }, "devDependencies": { "@babel/core": "^7.26.0", diff --git a/scripts/codespace-setup.sh b/scripts/codespace-setup.sh new file mode 100755 index 0000000..2c55d55 --- /dev/null +++ b/scripts/codespace-setup.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Add Ubuntu 22.04 (Jammy) repository for libssl3 +echo "deb http://archive.ubuntu.com/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/jammy.list + +# Update package lists +sudo apt-get update + +# Install libssl3 and other required dependencies +sudo apt-get install -y libssl3 + +# Install Python 3.9 or newer if not present +if ! command -v python3 >/dev/null 2>&1 || [ $(python3 -c 'import sys; print(sys.version_info >= (3, 9))') = 'False' ]; then + echo "Installing Python 3.9 or newer..." + sudo apt-get install -y python3.9 python3-pip +fi + +# Upgrade pip to latest version +python3 -m pip install --upgrade pip + +# Install pydantic dependencies +pip install -U pydantic +pip install pydantic-ai +pip install pydantic-ai[logfire] + +# Clean up - remove the temporary source list +sudo rm /etc/apt/sources.list.d/jammy.list +sudo apt-get update + +echo "Codespace dependencies installed successfully!" \ No newline at end of file diff --git a/src/core/utilities/historyManager.ts b/src/core/utilities/historyManager.ts new file mode 100644 index 0000000..6b4f152 --- /dev/null +++ b/src/core/utilities/historyManager.ts @@ -0,0 +1,47 @@ +import * as vscode from 'vscode'; +import { Exchange } from '../../model'; + +export class HistoryManager { + private static readonly HISTORY_KEY = 'codestory.history'; + + static async saveHistory(exchanges: Exchange[]): Promise { + try { + await vscode.commands.executeCommand('codestory.saveHistory', exchanges); + } catch (error) { + console.error('Failed to save history:', error); + } + } + + static async loadHistory(): Promise { + try { + const history = await vscode.commands.executeCommand('codestory.loadHistory'); + return history || []; + } catch (error) { + console.error('Failed to load history:', error); + return []; + } + } + + static async clearHistory(): Promise { + try { + await vscode.commands.executeCommand('codestory.clearHistory'); + } catch (error) { + console.error('Failed to clear history:', error); + } + } + + static async clearHistoryWithPermission(): Promise { + try { + const result = await vscode.window.showWarningMessage( + 'Are you sure you want to clear the history?', + { modal: true }, + 'Yes', + 'No' + ); + return result === 'Yes'; + } catch (error) { + console.error('Failed to show confirmation dialog:', error); + return false; + } + } +} \ No newline at end of file diff --git a/src/core/utilities/modelSelection.ts b/src/core/utilities/modelSelection.ts index 3ba96a5..0e8736f 100644 --- a/src/core/utilities/modelSelection.ts +++ b/src/core/utilities/modelSelection.ts @@ -6,22 +6,78 @@ export namespace MockModelSelection { export const fastModel: string = 'ClaudeSonnet'; export const models: LanguageModels = { - // Gpt4: { - // name: 'GPT-4', - // contextLength: 8192, - // temperature: 0.2, - // provider: { - // type: 'codestory', - // }, - // }, - // DeepSeekCoder33BInstruct: { - // name: 'DeepSeek Coder 33B Instruct', - // contextLength: 16384, - // temperature: 0.2, - // provider: { - // type: 'codestory', - // }, - // }, + // 'gpt-4': { + // name: 'GPT-4', + // contextLength: 8192, + // temperature: 0.2, + // provider: { + // type: 'codestory', + // }, + // }, + // 'DeepSeekCoder33BInstruct': { + // name: 'DeepSeek Coder 33B Instruct', + // contextLength: 16384, + // temperature: 0.2, + // provider: { + // type: 'codestory', + // }, + //}, + 'gpt-4': { + name: 'GPT-4', + contextLength: 8192, + temperature: 0.2, + provider: { + type: 'openai', + }, + }, + 'gpt-4-turbo-preview': { + name: 'GPT-4 Turbo', + contextLength: 128000, + temperature: 0.2, + provider: { + type: 'openai', + }, + }, + 'gpt-3.5-turbo': { + name: 'GPT-3.5 Turbo', + contextLength: 16385, + temperature: 0.2, + provider: { + type: 'openai', + }, + }, + 'gpt-4o': { + name: 'GPT-4o', + contextLength: 128000, + temperature: 0.2, + provider: { + type: 'openai', + }, + }, + 'gpt-4o-mini': { + name: 'GPT-4o Mini', + contextLength: 16385, + temperature: 0.2, + provider: { + type: 'openai', + }, + }, + 'o1-preview': { + name: 'O1 Preview', + contextLength: 128000, + temperature: 0.2, + provider: { + type: 'openai', + }, + }, + 'o1-mini': { + name: 'O1 Mini', + contextLength: 16385, + temperature: 0.2, + provider: { + type: 'openai', + }, + }, ClaudeSonnet: { name: 'Claude Sonnet', contextLength: 200000, @@ -38,18 +94,61 @@ export namespace MockModelSelection { type: 'anthropic', }, }, + 'anthropic.claude-3-sonnet-20240229-v1:0': { + name: 'Claude 3 Sonnet (Bedrock)', + contextLength: 200000, + temperature: 0.2, + provider: { + type: 'aws-bedrock', + }, + }, + 'anthropic.claude-3-haiku-20240307-v1:0': { + name: 'Claude 3 Haiku (Bedrock)', + contextLength: 200000, + temperature: 0.2, + provider: { + type: 'aws-bedrock', + }, + }, + 'anthropic.claude-v2:1': { + name: 'Claude 2 (Bedrock)', + contextLength: 100000, + temperature: 0.2, + provider: { + type: 'aws-bedrock', + }, + }, + 'amazon.titan-text-express-v1': { + name: 'Titan Text Express', + contextLength: 8000, + temperature: 0.2, + provider: { + type: 'aws-bedrock', + }, + }, + 'meta.llama2-70b-chat-v1': { + name: 'Llama 2 70B', + contextLength: 4096, + temperature: 0.2, + provider: { + type: 'aws-bedrock', + }, + }, }; export const providers: ModelProviders = { - //"codestory": { - // name: "CodeStory" + //codestory: { + // name: "CodeStory" //}, - //"ollama": { - // name: "Ollama" - //} + openai: { + name: 'OpenAI', + }, anthropic: { name: 'Anthropic', }, + 'aws-bedrock': { + name: 'AWS Bedrock', + }, 'open-router': { name: 'Open Router', }, diff --git a/src/extension.ts b/src/extension.ts index 5941714..7386473 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -20,12 +20,15 @@ import { SimpleBrowserView } from './browser/simpleBrowserView'; import { SimpleBrowserManager } from './browser/simpleBrowserManager'; import { findPortPosition } from './utils/port'; import { ReactDevtoolsManager } from './devtools/react/DevtoolsManager'; +import { Exchange } from './model'; const openApiCommand = 'sota-swe.api.open'; const showCommand = 'sota-swe.show-browser'; export let SIDECAR_CLIENT: SideCarClient | null = null; +const HISTORY_STORAGE_KEY = 'codestory.history'; + /** Extension → PanelProvider → Webview (app.tsx) (native) (bridge) (UI layer) @@ -70,6 +73,25 @@ export async function activate(context: vscode.ExtensionContext) { RepoRefBackend.local ); + // Register history-related commands + context.subscriptions.push( + vscode.commands.registerCommand('codestory.saveHistory', async (exchanges: Exchange[]) => { + await context.globalState.update(HISTORY_STORAGE_KEY, exchanges); + }) + ); + + context.subscriptions.push( + vscode.commands.registerCommand('codestory.loadHistory', async () => { + return context.globalState.get(HISTORY_STORAGE_KEY) || []; + }) + ); + + context.subscriptions.push( + vscode.commands.registerCommand('codestory.clearHistory', async () => { + await context.globalState.update(HISTORY_STORAGE_KEY, []); + }) + ); + // We also get some context about the workspace we are in and what we are // upto const projectContext = new ProjectContext(); diff --git a/src/model.ts b/src/model.ts index cb0fcb0..ef93f9f 100644 --- a/src/model.ts +++ b/src/model.ts @@ -6,7 +6,7 @@ export enum View { Preset = 'preset', Welcome = 'welcome', Settings = 'settings', - // History = 'history', + History = 'history', } export type ViewType = `${View}`; @@ -365,10 +365,10 @@ export type Exchange = Request | Response; export enum Provider { Anthropic = 'anthropic', - //OpenAI = 'open-ai', + OpenAI = 'openai', OpenRouter = 'open-router', //GoogleGemini = 'google-gemini', - //AWSBedrock = 'aws-bedrock', + AWSBedrock = 'aws-bedrock', OpenAICompatible = 'openai-compatible', //Ollama = 'ollama', } @@ -379,7 +379,25 @@ export enum AnthropicModels { ClaudeOpus = 'ClaudeOpus', } -export type Models = `${AnthropicModels}`; +export enum OpenAIModels { + GPT4 = 'gpt-4', + GPT4Turbo = 'gpt-4-turbo-preview', + GPT35Turbo = 'gpt-3.5-turbo', + GPT4O = 'gpt-4o', + GPT4OMini = 'gpt-4o-mini', + O1Preview = 'o1-preview', + O1Mini = 'o1-mini' +} + +export enum AWSBedrockModels { + Claude3Sonnet = 'anthropic.claude-3-sonnet-20240229-v1:0', + Claude3Haiku = 'anthropic.claude-3-haiku-20240307-v1:0', + Claude2 = 'anthropic.claude-v2:1', + Titan = 'amazon.titan-text-express-v1', + Llama2 = 'meta.llama2-70b-chat-v1', +} + +export type Models = `${AnthropicModels}` | `${OpenAIModels}` | `${AWSBedrockModels}`; export enum PermissionState { Always = 'always', diff --git a/src/webviews/@history/history-view.tsx b/src/webviews/@history/history-view.tsx new file mode 100644 index 0000000..568a171 --- /dev/null +++ b/src/webviews/@history/history-view.tsx @@ -0,0 +1,57 @@ +import * as React from 'react'; +import { History } from '../components/history'; +import { HistoryManager } from '../../utilities/historyManager'; +import { useNavigate } from 'react-router-dom'; +import { Exchange } from '../../model'; +import * as vscode from 'vscode'; + +export const HistoryView: React.FC = () => { + const [exchanges, setExchanges] = React.useState([]); + const [loading, setLoading] = React.useState(true); + const navigate = useNavigate(); + + React.useEffect(() => { + const loadHistory = async () => { + try { + setLoading(true); + const loadedHistory = await HistoryManager.loadHistory(); + setExchanges(loadedHistory); + } catch (error) { + void vscode.window.showErrorMessage('Failed to load history'); + } finally { + setLoading(false); + } + }; + void loadHistory(); + }, []); + + const handleClearHistory = async () => { + try { + const confirmed = await HistoryManager.clearHistoryWithPermission(); + if (confirmed) { + await HistoryManager.clearHistory(); + setExchanges([]); + navigate('/task'); // Navigate back to task view after clearing + } + } catch (error) { + void vscode.window.showErrorMessage('Failed to clear history'); + } + }; + + if (loading) { + return ( +
+
+
+ ); + } + + return ( +
+ +
+ ); +}; \ No newline at end of file diff --git a/src/webviews/components/history.tsx b/src/webviews/components/history.tsx new file mode 100644 index 0000000..b78b3a6 --- /dev/null +++ b/src/webviews/components/history.tsx @@ -0,0 +1,53 @@ +import * as React from 'react'; +import { Exchange, Response, Request } from '../../model'; +import MarkdownRenderer from './markdown-renderer'; + +interface HistoryProps { + exchanges: Exchange[]; + onClearHistory: () => void; +} + +export const History: React.FC = ({ exchanges, onClearHistory }: HistoryProps) => { + return ( +
+
+

Session History

+ +
+
+ {exchanges.map((exchange: Exchange, index: number) => ( +
+ {exchange.type === 'request' ? ( +
+
User Request:
+
{(exchange as Request).message}
+
+ ) : ( +
+
Assistant Response:
+
+ {(exchange as Response).parts.map((part, partIndex) => { + if (part.type === 'markdown') { + return ( + + ); + } + return null; + })} +
+
+ )} +
+ ))} +
+
+ ); +}; \ No newline at end of file diff --git a/src/webviews/components/preset.tsx b/src/webviews/components/preset.tsx index 6a35a85..e8aee3a 100644 --- a/src/webviews/components/preset.tsx +++ b/src/webviews/components/preset.tsx @@ -1,19 +1,19 @@ import { Provider, ProviderType } from '../../model'; import AnthropicLogo from '../assets/providers-logos/anthropic.svg'; -// import AWSBedrockLogo from '../assets/providers-logos/aws-bedrock.svg'; +import AWSBedrockLogo from '../assets/providers-logos/aws-bedrock.svg'; // import GeminiLogo from '../assets/providers-logos/gemini.svg'; // import OllamaLogo from '../assets/providers-logos/ollama.svg'; -// import OpenAILogo from '../assets/providers-logos/openai.svg'; +import OpenAILogo from '../assets/providers-logos/openai.svg'; import OpenRouterLogo from '../assets/providers-logos/open-router.svg'; import { cn } from 'utils/cn'; const logoMap = new Map>>(); logoMap.set(Provider.Anthropic, AnthropicLogo); -// logoMap.set(Provider.OpenAI, OpenAILogo); +logoMap.set(Provider.OpenAI, OpenAILogo); logoMap.set(Provider.OpenRouter, OpenRouterLogo); // logoMap.set(Provider.GoogleGemini, GeminiLogo); -// logoMap.set(Provider.AWSBedrock, AWSBedrockLogo); -// logoMap.set(Provider.OpenAICompatible, OpenAILogo); +logoMap.set(Provider.AWSBedrock, AWSBedrockLogo); +logoMap.set(Provider.OpenAICompatible, OpenAILogo); // logoMap.set(Provider.Ollama, OllamaLogo); export type PresetLogoProps = React.SVGProps & { diff --git a/src/webviews/routes.tsx b/src/webviews/routes.tsx index 08035f2..751e01d 100644 --- a/src/webviews/routes.tsx +++ b/src/webviews/routes.tsx @@ -5,6 +5,9 @@ import { createMemoryRouter, useLocation, useNavigate } from 'react-router-dom'; import { View, Event } from '../model'; import { loadSettings, SettingsView } from '@settings/settings-view'; import * as React from 'react'; +import { HistoryView } from './@history/history-view'; + +declare const vscode: any; export const router = createMemoryRouter( [ @@ -20,10 +23,10 @@ export const router = createMemoryRouter( path: View.Task, element: , }, - // { - // path: View.History, - // element: , - // }, + { + path: View.History, + element: , + }, { path: View.Settings, loader: loadSettings, @@ -56,7 +59,7 @@ export function useNavigationFromExtension() { return () => { window.removeEventListener('message', handleMessage); }; - }, []); + }, [navigate]); // workaround to start a new task React.useEffect(() => {