Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions examples/.env.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Azure AI Search Configuration
AZURE_SEARCH_SERVICE_NAME="your-search-service-name"
AZURE_SEARCH_INDEX_NAME="test-console-memories"
AZURE_SEARCH_API_KEY="your-search-api-key"
AZURE_SEARCH_ENDPOINT="https://your-search-service-name.search.windows.net"

# OpenAI Configuration (required for embeddings)
OPENAI_API_KEY="your-openai-api-key"
# OPENAI_ENDPOINT="your-custom-openai-endpoint" # Optional, uncomment if using Azure OpenAI
55 changes: 55 additions & 0 deletions examples/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Teams Memory Examples

This directory contains example applications demonstrating how to use the `teams_memory` package.

## Azure AI Search Console Test

The [`azure_search_console_test.py`](azure_search_console_test.py) script demonstrates how to use Azure AI Search storage functionality in the `teams_memory` package. It initializes a `MemoryCore` with Azure AI Search storage, adds sample memories, and retrieves them based on a query.

### Environment Variables

The example requires several environment variables to be set. You can set these variables in two ways:

#### Option 1: Using a `.env` file

1. Create a `.env` file in the `examples/` directory or in the project root.
2. Copy the contents from `.env.sample` and replace the placeholder values with your actual values:

```
# Azure AI Search Configuration
AZURE_SEARCH_SERVICE_NAME="your-search-service-name"
AZURE_SEARCH_INDEX_NAME="test-console-memories"
AZURE_SEARCH_API_KEY="your-search-api-key"
AZURE_SEARCH_ENDPOINT="https://your-search-service-name.search.windows.net"

# OpenAI Configuration (required for embeddings)
OPENAI_API_KEY="your-openai-api-key"
# OPENAI_ENDPOINT="your-custom-openai-endpoint" # Optional, uncomment if using Azure OpenAI
```

#### Option 2: Setting environment variables directly

You can also set the environment variables directly in your shell:

```bash
export AZURE_SEARCH_SERVICE_NAME="your-search-service-name"
export AZURE_SEARCH_INDEX_NAME="test-console-memories"
export AZURE_SEARCH_API_KEY="your-search-api-key"
export AZURE_SEARCH_ENDPOINT="https://your-search-service-name.search.windows.net"
export OPENAI_API_KEY="your-openai-api-key"
# export OPENAI_ENDPOINT="your-custom-openai-endpoint" # Optional, uncomment if using Azure OpenAI
```

### Running the Example

Once you've set up the environment variables, you can run the example:

```bash
python azure_search_console_test.py
```

The script will:
1. Initialize a `MemoryCore` with Azure AI Search storage
2. Add sample memories
3. Retrieve memories based on a query
4. Retrieve memories by topic
184 changes: 184 additions & 0 deletions examples/azure_search_console_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
#!/usr/bin/env python
"""
Azure AI Search Memory Storage Test Console Application

This script demonstrates the use of Azure AI Search storage functionality
in the teams_memory package. It initializes a MemoryCore with Azure AI Search
storage, adds sample memories, and retrieves them based on a query.

Required environment variables:
- AZURE_SEARCH_SERVICE_NAME: The name of the Azure AI Search service
- AZURE_SEARCH_INDEX_NAME: The name of the search index (e.g., "test-console-memories")
- AZURE_SEARCH_API_KEY: The API key for the Azure AI Search service
- AZURE_SEARCH_ENDPOINT: The endpoint URL for the Azure AI Search service
"""
import os
import asyncio
import datetime
import sys
from pathlib import Path
from typing import Optional
from dotenv import load_dotenv

from teams_memory.core.memory_core import MemoryCore
from teams_memory.config import MemoryModuleConfig, StorageConfig, LLMConfig
from teams_memory.services.llm_service import LLMService
from teams_memory.interfaces.types import BaseMemoryInput, MemoryType

# Try to load environment variables from .env files
# First check in the examples directory, then in the project root
env_paths = [
Path(__file__).parent / ".env", # examples/.env
Path(__file__).parent.parent / ".env", # .env in project root
]

env_loaded = False
for env_path in env_paths:
if env_path.exists():
load_dotenv(env_path)
print(f"Loaded environment variables from {env_path}")
env_loaded = True
break

if not env_loaded:
print("No .env file found. Using environment variables from the shell.")



Check failure on line 47 in examples/azure_search_console_test.py

View workflow job for this annotation

GitHub Actions / Build, Lint & Test (3.12)

Ruff (E501)

examples/azure_search_console_test.py:47:121: E501 Line too long (134 > 120)
async def add_sample_memory(memory_core: MemoryCore, content: str, user_id: str, topics: Optional[list[str]] = None) -> Optional[str]:
"""Add a sample memory to the memory core."""
print(f"Adding memory: {content}")

memory = BaseMemoryInput(
content=content,
created_at=datetime.datetime.now(),
user_id=user_id,
memory_type=MemoryType.SEMANTIC,
topics=topics or [],
)

# For the sample, we'll use the content as the embedding
# In a real application, you would use the LLM service to generate embeddings
embedding_text = await memory_core.lm.embedding(input=[content])
embedding_vector = embedding_text.data[0]["embedding"]

from teams_memory.interfaces.types import TextEmbedding
embedding = TextEmbedding(text=content, embedding_vector=embedding_vector)

memory_id = await memory_core.storage.store_memory(memory, embedding_vectors=[embedding])
if memory_id:
print(f"Memory added with ID: {memory_id}")
else:
print("Failed to add memory")
return memory_id


async def main():
"""Main function to run the Azure AI Search memory test."""
# Check for required environment variables
required_vars = [
"AZURE_SEARCH_SERVICE_NAME",
"AZURE_SEARCH_INDEX_NAME",
"AZURE_SEARCH_API_KEY",
"AZURE_SEARCH_ENDPOINT"
]

missing_vars = [var for var in required_vars if not os.environ.get(var)]
if missing_vars:
print("Error: The following required environment variables are not set:")
for var in missing_vars:
print(f" - {var}")
print("\nPlease set these environment variables and try again.")
print("You can either:")
print("1. Set them directly in your shell:")
print(' export AZURE_SEARCH_SERVICE_NAME="your-search-service-name"')
print(' export AZURE_SEARCH_INDEX_NAME="test-console-memories"')
print(' export AZURE_SEARCH_API_KEY="your-search-api-key"')
print(' export AZURE_SEARCH_ENDPOINT="https://your-search-service-name.search.windows.net"')
print("\n2. Create a .env file in the examples/ directory or project root with the following content:")
print(' AZURE_SEARCH_SERVICE_NAME="your-search-service-name"')
print(' AZURE_SEARCH_INDEX_NAME="test-console-memories"')
print(' AZURE_SEARCH_API_KEY="your-search-api-key"')
print(' AZURE_SEARCH_ENDPOINT="https://your-search-service-name.search.windows.net"')
print(' OPENAI_API_KEY="your-openai-api-key" # Required for embeddings')
return

print("Initializing Azure Search Memory...")

# Configure the memory module
config = MemoryModuleConfig(
storage=StorageConfig(
storage_type="azure-search",
search_service_name=os.environ["AZURE_SEARCH_SERVICE_NAME"],
search_index_name=os.environ["AZURE_SEARCH_INDEX_NAME"],
search_api_key=os.environ["AZURE_SEARCH_API_KEY"],
search_endpoint=os.environ["AZURE_SEARCH_ENDPOINT"],
),
llm=LLMConfig(
model="gpt-3.5-turbo", # Default model for completions
embedding_model="text-embedding-ada-002", # Default model for embeddings
api_key=os.environ.get("OPENAI_API_KEY"), # Get OpenAI API key from environment
api_base=os.environ.get("OPENAI_ENDPOINT"), # Get OpenAI endpoint from environment if set
),
enable_logging=True, # Enable logging for better visibility
)

# Initialize LLM service and memory core
llm_service = LLMService(config.llm)
memory_core = MemoryCore(config, llm_service)

print("Azure Search Memory initialized successfully.")

# Add sample memories
user_id = "test-user-123"

await add_sample_memory(
memory_core,
"The user enjoys hiking in the mountains on weekends.",
user_id,
["General Interests and Preferences"]
)

await add_sample_memory(
memory_core,
"The user lives in Seattle and works as a software engineer.",
user_id,
["General Facts about the user"]
)

await add_sample_memory(
memory_core,
"The user has a dog named Max that they adopted last year.",
user_id,
["General Facts about the user"]
)

# Retrieve memories based on a query
query = "hiking"
print(f"\nRetrieving memories for query: '{query}'")

memories = await memory_core.search_memories(user_id=user_id, query=query)

if memories:
print(f"Found {len(memories)} memories:")
for i, memory in enumerate(memories, 1):
print(f"{i}. {memory.content} (ID: {memory.id})")
else:
print("No memories found for the query.")

# Retrieve memories by topic
topic = "General Facts about the user"
print(f"\nRetrieving memories for topic: '{topic}'")

memories = await memory_core.search_memories(user_id=user_id, topic=topic)

if memories:
print(f"Found {len(memories)} memories:")
for i, memory in enumerate(memories, 1):
print(f"{i}. {memory.content} (ID: {memory.id})")
else:
print("No memories found for the topic.")


if __name__ == "__main__":
asyncio.run(main())
18 changes: 18 additions & 0 deletions packages/teams_memory/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,21 @@
- Added Tech Support Assistant sample and memory confirmation with citations.
- Improved deduplication and similarity calculations (cosine distance).
- Added evals for memory extraction and retrieval.

# Changelog

All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [Unreleased]

### Added

- Azure AI Search storage option for enhanced memory retrieval
- Support for vector search with HNSW algorithm
- Hybrid search capabilities combining vector similarity with traditional keyword search
- Semantic ranking for improved relevance
- Managed service benefits with automatic scaling and high availability
- Support for both API key and Azure managed identity authentication
50 changes: 46 additions & 4 deletions packages/teams_memory/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@ The Teams Memory Module is a simple yet powerful library designed to help manage
- **Query-Based or Topic-Based Memory Retrieval**:
Search for existing memories using natural language queries or predefined topics.

- **Multiple Storage Options**:
- In-memory storage for development and testing
- SQLite with vector extensions for small to medium deployments
- Azure AI Search for enterprise-scale deployments with advanced vector search capabilities

# Integration

Integrating the Memory Module into your Teams AI SDK application (or Bot Framework) is straightforward.
Expand All @@ -28,6 +33,10 @@ Integrating the Memory Module into your Teams AI SDK application (or Bot Framewo
- **Azure OpenAI or OpenAI Keys**:
The LLM layer is built using [LiteLLM](https://docs.litellm.ai/), which supports multiple [providers](https://docs.litellm.ai/docs/providers). However, only Azure OpenAI (AOAI) and OpenAI (OAI) have been tested.

- **For Azure AI Search Storage**:
- An Azure AI Search service instance
- Either an API key or Azure managed identity credentials

## Integrating into a Teams AI SDK Application

### Installing the Memory Module
Expand All @@ -45,19 +54,20 @@ Memory extraction requires incoming and outgoing messages to your application. T
After building your bot `Application`, create a `MemoryMiddleware` with the following configurations:

- **`llm`**: Configuration for the LLM (required).
- **`storage`**: Configuration for the storage layer. Defaults to `InMemoryStorage` if not provided.
- **`storage`**: Configuration for the storage layer. Options include:
- **In-memory storage** (default): No configuration needed
- **SQLite storage**: Provide a `db_path`
- **Azure AI Search storage**: Provide Azure AI Search configuration
- **`buffer_size`**: Minimum size of the message buffer before memory extraction is triggered.
- **`timeout_seconds`**: Time elapsed after the buffer starts filling up before extraction occurs.
- **Note**: Extraction occurs when either the `buffer_size` is reached or the `timeout_seconds` elapses, whichever happens first.
- **`topics`**: Topics relevant to your application. These help the LLM focus on important information and avoid unnecessary extractions.

```python
# Example with in-memory storage (default)
memory_middleware = MemoryMiddleware(
config=MemoryModuleConfig(
llm=LLMConfig(**memory_llm_config),
storage=StorageConfig(
db_path=os.path.join(os.path.dirname(__file__), "data", "memory.db")
), # Uses SQLite if `db_path` is provided
timeout_seconds=60, # Extraction occurs 60 seconds after the first message
enable_logging=True, # Helpful for debugging
topics=[
Expand All @@ -67,6 +77,38 @@ memory_middleware = MemoryMiddleware(
], # Example topics for a tech-assistant agent
)
)

# Example with SQLite storage
memory_middleware = MemoryMiddleware(
config=MemoryModuleConfig(
llm=LLMConfig(**memory_llm_config),
storage=StorageConfig(
db_path=os.path.join(os.path.dirname(__file__), "data", "memory.db")
), # Uses SQLite if `db_path` is provided
timeout_seconds=60,
enable_logging=True,
topics=[...],
)
)

# Example with Azure AI Search storage
memory_middleware = MemoryMiddleware(
config=MemoryModuleConfig(
llm=LLMConfig(**memory_llm_config),
storage=StorageConfig(
storage_type="azure-search", # Explicitly set storage type
search_service_name="your-search-service-name", # Required
search_index_name="teams-memories", # Optional, defaults to "teams-memories"
search_api_key="your-api-key", # Optional, uses DefaultAzureCredential if not provided
search_api_version="2023-07-01-Preview", # Optional
search_endpoint=None, # Optional, constructs default endpoint if not provided
),
timeout_seconds=60,
enable_logging=True,
topics=[...],
)
)

bot_app.adapter.use(memory_middleware)
```

Expand Down
2 changes: 2 additions & 0 deletions packages/teams_memory/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ dependencies = [
"litellm==1.54.1",
"botbuilder>=0.0.1",
"botframework-connector>=4.16.2",
"azure-search-documents>=11.4.0",
"azure-identity>=1.15.0",
]

[project.urls]
Expand Down
Loading
Loading