Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ venv.bak/
.dmypy.json
dmypy.json

.copilot-instructions.md
private/

# other
.DS_STORE
Expand All @@ -30,6 +30,7 @@ ref/
py.typed
CLAUDE.md

.copilot-instructions.md
.env.claude/
.claude/

Expand Down
10 changes: 7 additions & 3 deletions examples/a2a-test/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,11 @@ async def location_handler(params: LocationParams) -> str:
parts=[Part(root=TextPart(kind="text", text="Please provide a location"))],
)
else:
return result.response.content if result.response.content else "No weather information available."
return (
result.response.content
if result.response and result.response.content
else "No weather information available."
)


# A2A Server Message Event Handler
Expand All @@ -206,7 +210,7 @@ async def handle_a2a_message(message: A2AMessageEvent) -> None:
await respond(result)


async def handler(message: str) -> ModelMessage:
async def handler(message: str) -> ModelMessage | None:
# Now we can send the message to the prompt and it will decide if
# the a2a agent should be used or not and also manages contacting the agent
result = await prompt.send(message)
Expand All @@ -219,7 +223,7 @@ async def handle_message(ctx: ActivityContext[MessageActivity]):
await ctx.reply(TypingActivityInput())

result = await handler(ctx.activity.text)
if result.content:
if result and result.content:
await ctx.send(result.content)


Expand Down
4 changes: 2 additions & 2 deletions examples/ai-test/src/handlers/function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ async def handle_pokemon_search(model: AIModel, ctx: ActivityContext[MessageActi
input=ctx.activity.text, instructions="You are a helpful assistant that can look up Pokemon for the user."
)

if chat_result.response.content:
if chat_result.response and chat_result.response.content:
message = MessageActivityInput(text=chat_result.response.content).add_ai_generated()
await ctx.send(message)
else:
Expand Down Expand Up @@ -129,7 +129,7 @@ async def handle_multiple_functions(model: AIModel, ctx: ActivityContext[Message
),
)

if chat_result.response.content:
if chat_result.response and chat_result.response.content:
message = MessageActivityInput(text=chat_result.response.content).add_ai_generated()
await ctx.send(message)
else:
Expand Down
2 changes: 1 addition & 1 deletion examples/ai-test/src/handlers/memory_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ async def handle_stateful_conversation(model: AIModel, ctx: ActivityContext[Mess
input=ctx.activity.text, instructions="You are a helpful assistant that remembers our previous conversation."
)

if chat_result.response.content:
if chat_result.response and chat_result.response.content:
message = MessageActivityInput(text=chat_result.response.content).add_ai_generated()
await ctx.send(message)
else:
Expand Down
6 changes: 3 additions & 3 deletions examples/ai-test/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ async def handle_simple_chat(ctx: ActivityContext[MessageActivity]):
input=ctx.activity.text, instructions="You are a friendly assistant who talks like a pirate"
)

if chat_result.response.content:
if chat_result.response and chat_result.response.content:
message = MessageActivityInput(text=chat_result.response.content).add_ai_generated()
await ctx.send(message)

Expand Down Expand Up @@ -107,7 +107,7 @@ async def handle_streaming(ctx: ActivityContext[MessageActivity]):

if hasattr(ctx.activity.conversation, "is_group") and ctx.activity.conversation.is_group:
# Group chat - send final response
if chat_result.response.content:
if chat_result.response and chat_result.response.content:
message = MessageActivityInput(text=chat_result.response.content).add_ai_generated()
await ctx.send(message)
else:
Expand Down Expand Up @@ -167,7 +167,7 @@ async def handle_feedback_demo(ctx: ActivityContext[MessageActivity]):
input="Tell me a short joke", instructions="You are a comedian. Keep responses brief and funny."
)

if chat_result.response.content:
if chat_result.response and chat_result.response.content:
# Create message with feedback enabled and initialize storage
message = MessageActivityInput(text=chat_result.response.content).add_ai_generated().add_feedback()
sent_message = await ctx.send(message)
Expand Down
6 changes: 3 additions & 3 deletions examples/mcp-client/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ async def handle_agent_chat(ctx: ActivityContext[MessageActivity]):

# Use ChatPrompt with MCP tools (stateful conversation)
result = await responses_prompt.send(query)
if result.response.content:
if result.response and result.response.content:
message = MessageActivityInput(text=result.response.content).add_ai_generated()
await ctx.send(message)

Expand All @@ -111,7 +111,7 @@ async def handle_prompt_chat(ctx: ActivityContext[MessageActivity]):
),
)

if result.response.content:
if result.response and result.response.content:
message = MessageActivityInput(text=result.response.content).add_ai_generated()
await ctx.send(message)

Expand Down Expand Up @@ -157,7 +157,7 @@ async def handle_fallback_message(ctx: ActivityContext[MessageActivity]):

# Use ChatPrompt with MCP tools for general conversation
result = await responses_prompt.send(ctx.activity.text)
if result.response.content:
if result.response and result.response.content:
message = MessageActivityInput(text=result.response.content).add_ai_generated()
await ctx.send(message)

Expand Down
20 changes: 18 additions & 2 deletions packages/ai/src/microsoft/teams/ai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,21 @@
Licensed under the MIT License.
"""

from . import plugins, utils
from .ai_model import AIModel
from .chat_prompt import ChatPrompt, ChatSendResult
from .function import Function, FunctionCall, FunctionHandler, FunctionHandlers, FunctionHandlerWithNoParams
from .function import (
DeferredResult,
Function,
FunctionCall,
FunctionHandler,
FunctionHandlers,
FunctionHandlerWithNoParams,
)
from .memory import ListMemory, Memory
from .message import FunctionMessage, Message, ModelMessage, SystemMessage, UserMessage
from .message import DeferredMessage, FunctionMessage, Message, ModelMessage, SystemMessage, UserMessage
from .plugin import AIPluginProtocol, BaseAIPlugin
from .utils import * # noqa: F401, F403

__all__ = [
"ChatSendResult",
Expand All @@ -17,12 +27,18 @@
"ModelMessage",
"SystemMessage",
"FunctionMessage",
"DeferredMessage",
"Function",
"FunctionCall",
"DeferredResult",
"Memory",
"ListMemory",
"AIModel",
"AIPluginProtocol",
"BaseAIPlugin",
"FunctionHandler",
"FunctionHandlerWithNoParams",
"FunctionHandlers",
]
__all__.extend(utils.__all__)
__all__.extend(plugins.__all__)
70 changes: 70 additions & 0 deletions packages/ai/src/microsoft/teams/ai/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
"""
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
"""

from typing import Any, Awaitable, Callable

from microsoft.teams.ai.plugin import AIPluginProtocol

from .ai_model import AIModel
from .chat_prompt import ChatPrompt, ChatSendResult
from .function import Function
from .memory import ListMemory, Memory
from .message import Message, SystemMessage


class Agent(ChatPrompt):
"""
A stateful implementation of ChatPrompt with persistent memory.

Agent extends ChatPrompt by providing default memory management,
making it easier to maintain conversation context across multiple
interactions without manually passing memory each time.
"""

def __init__(
self,
model: AIModel,
*,
memory: Memory | None = None,
functions: list[Function[Any]] | None = None,
plugins: list[AIPluginProtocol] | None = None,
):
"""
Initialize Agent with model and persistent memory.

Args:
model: AI model implementation for text generation
memory: Memory for conversation persistence. Defaults to InMemory ListMemory
functions: Optional list of functions the model can call
plugins: Optional list of plugins for extending functionality
"""
super().__init__(model, functions=functions, plugins=plugins)
self.memory = memory or ListMemory()

async def send(
self,
input: str | Message | None,
*,
instructions: str | SystemMessage | None = None,
memory: Memory | None = None,
on_chunk: Callable[[str], Awaitable[None]] | Callable[[str], None] | None = None,
) -> ChatSendResult:
"""
Send a message using the agent's persistent memory.

Args:
input: Message to send (string will be converted to UserMessage)
instructions: Optional system message to guide model behavior
memory: Optional memory override. Defaults to agent's persistent memory
on_chunk: Optional callback for streaming response chunks

Returns:
ChatSendResult containing the final model response

Note:
If no memory is provided, uses the agent's default memory,
making conversation state persistent across calls.
"""
return await super().send(input, memory=memory or self.memory, instructions=instructions, on_chunk=on_chunk)
6 changes: 3 additions & 3 deletions packages/ai/src/microsoft/teams/ai/ai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from .function import Function
from .memory import Memory
from .message import Message, ModelMessage, SystemMessage
from .message import DeferredMessage, Message, ModelMessage, SystemMessage


class AIModel(Protocol):
Expand All @@ -23,13 +23,13 @@ class AIModel(Protocol):

async def generate_text(
self,
input: Message,
input: Message | None,
*,
system: SystemMessage | None = None,
memory: Memory | None = None,
functions: dict[str, Function[BaseModel]] | None = None,
on_chunk: Callable[[str], Awaitable[None]] | None = None,
) -> ModelMessage:
) -> ModelMessage | list[DeferredMessage]:
"""
Generate a text response from the AI model.

Expand Down
Loading
Loading