Skip to content

Commit

Permalink
Python: updated reasoning samples (#10563)
Browse files Browse the repository at this point in the history
### Motivation and Context

<!-- Thank you for your contribution to the semantic-kernel repo!
Please help reviewers and future users, providing the following
information:
  1. Why is this change required?
  2. What problem does it solve?
  3. What scenario does it contribute to?
  4. If it fixes an open issue, please link to the issue here.
-->
Small update to the reasoning samples, that leverages the latest updates
to those api's.

Fixed typing issue in filters.

### Description

<!-- Describe your changes, the overall approach, the underlying design.
These notes will help understanding how your code works. Thanks! -->

### Contribution Checklist

<!-- Before submitting this PR, please make sure: -->

- [x] The code builds clean without any errors or warnings
- [x] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [x] All unit tests pass, and I have added new tests where possible
- [x] I didn't break anyone 😄
  • Loading branch information
eavanvalkenburg authored Feb 19, 2025
1 parent 374d8f9 commit b8835d2
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 106 deletions.
41 changes: 18 additions & 23 deletions python/samples/concepts/reasoning/simple_reasoning.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,8 @@

import asyncio

from samples.concepts.setup.chat_completion_services import (
Services,
get_chat_completion_service_and_request_settings,
)
from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (
from semantic_kernel.connectors.ai.open_ai import (
OpenAIChatCompletion,
OpenAIChatPromptExecutionSettings,
)
from semantic_kernel.contents import ChatHistory
Expand Down Expand Up @@ -59,17 +56,25 @@
Note: Unsupported features may be added in future updates.
"""

chat_completion_service, request_settings = get_chat_completion_service_and_request_settings(Services.OPENAI)
chat_service = OpenAIChatCompletion(service_id="reasoning", instruction_role="developer")
# Set the reasoning effort to "medium" and the maximum completion tokens to 5000.
request_settings = OpenAIChatPromptExecutionSettings(
service_id="reasoning", max_completion_tokens=2000, reasoning_effort="medium"
)


# Create a ChatHistory object
chat_history = ChatHistory()

# This is the system message that gives the chatbot its personality.
developer_message = """
As an assistant supporting the user,
you recognize all user input
as questions or consultations and answer them.
you recognize all user input
as questions or consultations and answer them.
"""

# Create a ChatHistory object
chat_history = ChatHistory()
# The developer message was newly introduced for reasoning models such as OpenAI’s o1 and o1-mini.
# `system message` cannot be used with reasoning models.
chat_history.add_developer_message(developer_message)


async def chat() -> bool:
Expand All @@ -86,25 +91,15 @@ async def chat() -> bool:
print("\n\nExiting chat...")
return False

# The developer message was newly introduced for reasoning models such as OpenAI’s o1 and o1-mini.
# `system message` cannot be used with reasoning models.
chat_history.add_developer_message(developer_message)
chat_history.add_user_message(user_input)

if not isinstance(request_settings, OpenAIChatPromptExecutionSettings):
raise ValueError("The OpenAI prompt execution settings are not supported for this sample.")

# Set the reasoning effort to "medium" and the maximum completion tokens to 5000.
request_settings.max_completion_tokens = 5000
request_settings.reasoning_effort = "medium"

# Get the chat message content from the chat completion service.
response = await chat_completion_service.get_chat_message_content(
response = await chat_service.get_chat_message_content(
chat_history=chat_history,
settings=request_settings,
)
if response:
print(f"Mosscap:> {response}")
print(f"Reasoning model:> {response}")

# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_message(response)
Expand Down
116 changes: 33 additions & 83 deletions python/samples/concepts/reasoning/simple_reasoning_function_calling.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,14 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio
from collections.abc import Awaitable, Callable

from samples.concepts.setup.chat_completion_services import (
Services,
get_chat_completion_service_and_request_settings,
)
from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.function_calling_utils import (
kernel_function_metadata_to_function_call_format,
)
from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (
OpenAIChatPromptExecutionSettings,
)
from semantic_kernel.connectors.ai import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings
from semantic_kernel.contents import ChatHistory
from semantic_kernel.contents.function_call_content import FunctionCallContent
from semantic_kernel.contents.function_result_content import FunctionResultContent
from semantic_kernel.core_plugins.time_plugin import TimePlugin
from semantic_kernel.filters import AutoFunctionInvocationContext, FilterTypes

"""
# Reasoning Models Sample
Expand Down Expand Up @@ -70,25 +62,44 @@
Note: Unsupported features may be added in future updates.
"""

chat_completion_service, request_settings = get_chat_completion_service_and_request_settings(
Services.OPENAI, instruction_role="developer"

chat_service = OpenAIChatCompletion(service_id="reasoning", instruction_role="developer")
# Set the reasoning effort to "medium" and the maximum completion tokens to 5000.
# also set the function_choice_behavior to auto and that includes auto invoking the functions.
request_settings = OpenAIChatPromptExecutionSettings(
service_id="reasoning",
max_completion_tokens=5000,
reasoning_effort="medium",
function_choice_behavior=FunctionChoiceBehavior.Auto(),
)

# This is the system message that gives the chatbot its personality.
developer_message = """
As an assistant supporting the user,
you recognize all user input
as questions or consultations and answer them.
"""

# Create a ChatHistory object
chat_history = ChatHistory()
# The reasoning models use developer instead of system, but because we set the instruction_role to developer,
# we can use the system message as the developer message.
chat_history = ChatHistory(
system_message="""
As an assistant supporting the user,
you recognize all user input
as questions or consultations and answer them.
"""
)

# Create a kernel and register plugin.
kernel = Kernel()
kernel.add_plugin(TimePlugin(), "time")


# add a simple filter to track the function call result
@kernel.filter(filter_type=FilterTypes.AUTO_FUNCTION_INVOCATION)
async def auto_function_invocation_filter(
context: AutoFunctionInvocationContext, next: Callable[[AutoFunctionInvocationContext], Awaitable[None]]
) -> None:
await next(context)
print("Tools:> FUNCTION CALL RESULT")
print(f" - time: {context.function_result}")


async def chat() -> bool:
try:
user_input = input("User:> ")
Expand All @@ -103,78 +114,17 @@ async def chat() -> bool:
print("\n\nExiting chat...")
return False

# The developer message was newly introduced for reasoning models such as OpenAI’s o1 and o1-mini.
# `system message` cannot be used with reasoning models.
chat_history.add_developer_message(developer_message)
chat_history.add_user_message(user_input)

if not isinstance(request_settings, OpenAIChatPromptExecutionSettings):
raise ValueError(f"{type(request_settings).__name__} settings are not supported for this sample.")

# Set the reasoning effort to "medium" and the maximum completion tokens to 5000.
request_settings.max_completion_tokens = 5000
request_settings.reasoning_effort = "medium"

# enable the function calling and disable parallel tool calls for reasoning models.
request_settings.parallel_tool_calls = None
request_settings.tool_choice = None
request_settings.tools = [
kernel_function_metadata_to_function_call_format(f) for f in kernel.get_full_list_of_function_metadata()
]

# Get the chat message content from the chat completion service.
response = await chat_completion_service.get_chat_message_content(
response = await chat_service.get_chat_message_content(
chat_history=chat_history,
settings=request_settings,
kernel=kernel,
)

if not response:
return True

function_calls = [item for item in response.items if isinstance(item, FunctionCallContent)]
if len(function_calls) == 0:
print(f"Mosscap:> {response}")
chat_history.add_message(response)
return True

# Invoke the function calls and update the chat history with the results.
print(f"processing {len(function_calls)} tool calls")
await asyncio.gather(
*[
kernel.invoke_function_call(
function_call=function_call,
chat_history=chat_history,
function_call_count=len(function_calls),
request_index=0,
)
for function_call in function_calls
],
)

# Convert the last tool message to a user message.
fc_results = [item for item in chat_history.messages[-1].items if isinstance(item, FunctionResultContent)]

result_prompt: list[str] = ["FUNCTION CALL RESULT"]
for fc_result in fc_results:
result_prompt.append(f"- {fc_result.plugin_name}: {fc_result.result}")

chat_history.remove_message(chat_history.messages[-1])
chat_history.add_user_message("\n".join(result_prompt))
print("Tools:> ", "\n".join(result_prompt))

# Get the chat message content from the chat completion service.
request_settings.tools = None
response = await chat_completion_service.get_chat_message_content(
chat_history=chat_history,
settings=request_settings,
)

# Add the chat message to the chat history to keep track of the conversation.
if response:
print(f"Mosscap:> {response}")
chat_history.add_message(response)

return True


Expand Down

0 comments on commit b8835d2

Please sign in to comment.