Skip to content

Commit

Permalink
working templating with handlebars for both samples 1 and 2
Browse files Browse the repository at this point in the history
  • Loading branch information
eavanvalkenburg committed Nov 9, 2023
1 parent db4c9e4 commit f604287
Show file tree
Hide file tree
Showing 11 changed files with 198 additions and 148 deletions.
11 changes: 6 additions & 5 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
"editor.formatOnType": true,
"editor.formatOnSave": true,
"editor.formatOnPaste": true,
"editor.defaultFormatter": "charliermarsh.ruff",
"python.formatting.provider": "charliermarsh.ruff",
"python.formatting.autopep8Args": [
"--max-line-length=160"
]
"[python]": {
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.codeActionsOnSave": {
"source.organizeImports.ruff": true
}
}
}
3 changes: 2 additions & 1 deletion python/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@ pyyaml
pydantic
jinja2
pybars3
ruff
ruff
black
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ template: |
{{/message}}
{{#each messages}}
{{#message role=Role}}
{{~Content~}}
{{#message role=role}}
{{~content~}}
{{/message}}
{{/each}}
template_format: handlebars
Expand All @@ -16,9 +16,9 @@ input_variables:
type: ChatHistory
description: The history of the chat.
is_required: true
output_variables:
output_variable:
name: assistant_response
type: str
type: string
description: The response from the assistant.
execution_settings:
- model_id_pattern: ^gpt-4
Expand Down
14 changes: 5 additions & 9 deletions python/samples/01-SimpleChat/run.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import asyncio
import os
import sys
import asyncio

from semantic_kernel.utils.settings import azure_openai_settings_from_dot_env_as_dict

# to allow the strange structure and the import of the new pieces
sys.path.append(os.getcwd())
from python.src.azure_chat_completion import RESPONSE_OBJECT_KEY, AzureChatCompletion
from python.src.kernel import newKernel as Kernel
from python.src.sk_function import SKFunction
from python.src.azure_chat_completion import AzureChatCompletion, RESPONSE_OBJECT_KEY


async def runner():
Expand All @@ -25,21 +26,16 @@ async def runner():

# create chat_history
chat_history = gpt35turbo.create_new_chat()
chat_history.add_system_message("Hello! I am a robot.")
chat_history.add_user_message("Hello! I am a human.")


# loop with input
while True:
user_input = input("User:> ")
if user_input == "exit":
break
chat_history.add_user_message(user_input)

# get response
response = await kernel.run_async(
chat_function,
variables={"messages": chat_history},
service=gpt35turbo
chat_function, variables={"messages": chat_history}, service=gpt35turbo
)

# print response
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ template: |
{{/message}}
{{#each messages}}
{{#message role=Role}}
{{~Content~}}
{{#message role=role}}
{{~content~}}
{{/message}}
{{/each}}
template_format: handlebars
Expand Down
18 changes: 10 additions & 8 deletions python/samples/02-PersonaChat/run.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import asyncio
import os
import sys
import asyncio

from semantic_kernel.utils.settings import azure_openai_settings_from_dot_env_as_dict

# to allow the strange structure and the import of the new pieces
sys.path.append(os.getcwd())
from python.src.azure_chat_completion import RESPONSE_OBJECT_KEY, AzureChatCompletion
from python.src.kernel import newKernel as Kernel
from python.src.sk_function import SKFunction
from python.src.azure_chat_completion import AzureChatCompletion, RESPONSE_OBJECT_KEY


async def runner():
Expand All @@ -17,17 +18,15 @@ async def runner():
)
chat_function = SKFunction.from_yaml(
os.getcwd()
+ "/python/samples/01-SimpleChat/plugins/ChatPlugin/SimpleChat.prompt.yaml"
+ "/python/samples/02-PersonaChat/plugins/ChatPlugin/PersonaChat.prompt.yaml"
)

# create kernel
kernel = Kernel(ai_services=[gpt35turbo])

# create chat_history
chat_history = gpt35turbo.create_new_chat()
chat_history.add_system_message("Hello! I am a robot.")
chat_history.add_user_message("Hello! I am a human.")


# loop with input
while True:
user_input = input("User:> ")
Expand All @@ -38,8 +37,11 @@ async def runner():
# get response
response = await kernel.run_async(
chat_function,
variables={"messages": chat_history},
service=gpt35turbo
variables={
"persona": "You are a snarky (yet helpful) teenage assistant. Make sure to use hip slang in every response.",
"messages": chat_history,
},
service=gpt35turbo,
)

# print response
Expand Down
76 changes: 29 additions & 47 deletions python/src/azure_chat_completion.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,13 @@
from typing import Any, Final

import openai
from semantic_kernel.connectors.ai import ChatCompletionClientBase
from semantic_kernel.sk_pydantic import SKBaseModel
from semantic_kernel.connectors.ai.open_ai.models.chat.open_ai_chat_message import (
OpenAIChatMessage,
)
from semantic_kernel.skill_definition.parameter_view import ParameterView as Parameter
from semantic_kernel.connectors.ai.open_ai.models.chat.function_call import FunctionCall
from semantic_kernel.connectors.ai import ChatCompletionClientBase

RESPONSE_OBJECT_KEY: Final = "response_object"

class OpenAIChatHistory(SKBaseModel):
messages: list[OpenAIChatMessage] = []
from .openai_chat_history import OpenAIChatHistory

def add_user_message(self, message: str):
self.messages.append(OpenAIChatMessage(role="user", fixed_content=message))

def add_assistant_message(
self, message: str | None = None, function_call: FunctionCall | None = None
):
self.messages.append(OpenAIChatMessage(role="assistant", fixed_content=message))

def add_system_message(self, message: str):
self.messages.append(OpenAIChatMessage(role="system", fixed_content=message))

def add_function_call_response(self, function_name: str, result: str):
self.messages.append(
OpenAIChatMessage(
role="function_call", fixed_content=result, name=function_name
)
)

def add_openai_response(self, response: Any):
if 'choices' in response:
message = response.choices[0].message
self.messages.append(OpenAIChatMessage(**message))
else:
self.messages.append(OpenAIChatMessage(**response))

def __iter__(self) -> iter:
return self.messages.__iter__()
RESPONSE_OBJECT_KEY: Final = "response_object"


class AzureChatCompletion(SKBaseModel, ChatCompletionClientBase):
Expand All @@ -54,36 +22,46 @@ def create_new_chat(self):

async def complete_chat_async(
self,
chat_history: OpenAIChatHistory,
rendered_template: str,
request_settings: dict,
output_variables: list[Parameter] = None,
**kwargs,
) -> dict:
chat_history = OpenAIChatHistory.from_prompt(rendered_template)
response = await self._send_chat_request(
chat_history, request_settings, None
chat_history, request_settings, functions=None, **kwargs
)
if request_settings.get('stream', False):
if request_settings.get("stream", False):
return {RESPONSE_OBJECT_KEY: response}
result_key = output_variables[0].name if output_variables else 'result'
res = {result_key: response.choices[0].message.content, RESPONSE_OBJECT_KEY: response}
result_key = output_variables[0].name if output_variables else "result"
res = {
result_key: response.choices[0].message.content,
RESPONSE_OBJECT_KEY: response,
}
return res

async def complete_chat_stream_async(
self,
chat_history: OpenAIChatHistory,
rendered_template: str,
request_settings: dict,
output_variables: list[Parameter] = None,
**kwargs,
) -> dict:
return await self.complete_chat_async(chat_history, request_settings, output_variables)

return await self.complete_chat_async(
rendered_template, request_settings, output_variables, **kwargs
)

async def complete_chat_with_functions_async(
self,
chat_history: OpenAIChatHistory,
rendered_template: str,
functions: list[dict],
request_settings: dict,
**kwargs,
) -> dict:
request_settings['stream'] = False
chat_history = OpenAIChatHistory.from_prompt(rendered_template)
request_settings["stream"] = False
response = await self._send_chat_request(
chat_history, request_settings, functions
chat_history, request_settings, functions, **kwargs
)
return {"result": response.choices[0].message, RESPONSE_OBJECT_KEY: response}

Expand All @@ -92,6 +70,7 @@ async def _send_chat_request(
chat_history: OpenAIChatHistory,
request_settings: dict,
functions: list[dict] = None,
**kwargs,
):
messages = [message.as_dict() for message in chat_history]

Expand All @@ -115,6 +94,9 @@ async def _send_chat_request(
model_args["function_call"] = request_settings["function_call"]
model_args["functions"] = functions

if kwargs:
model_args.update(kwargs)

response: Any = await openai.ChatCompletion.acreate(**model_args)
return response

Expand Down
28 changes: 28 additions & 0 deletions python/src/handlebars_prompt_template_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from typing import Any

from pybars import Compiler
from pydantic import PrivateAttr
from semantic_kernel.sk_pydantic import SKBaseModel


def _message(this, options, **kwargs):
# single message call, scope is messages object as context
# in messages loop, scope is ChatMessage object as context
if "role" in kwargs:
return f'<message role="{kwargs["role"]}">{options["fn"](this)}</message>'


# TODO: render functions are helpers


class HandleBarsPromptTemplateHandler(SKBaseModel):
template: str
_template_compiler: Any = PrivateAttr()

def __init__(self, template: str):
super().__init__(template=template)
compiler = Compiler()
self._template_compiler = compiler.compile(self.template)

def render(self, variables: dict) -> str:
return self._template_compiler(variables, helpers={"message": _message})
25 changes: 18 additions & 7 deletions python/src/kernel.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,27 @@
from typing import Any
from .sk_function import SKFunction

from semantic_kernel import Kernel
from semantic_kernel.connectors.ai import (
ChatCompletionClientBase,
TextCompletionClientBase,
EmbeddingGeneratorBase,
TextCompletionClientBase,
)

from .sk_function import SKFunction


class newKernel(Kernel):
plugins: list[SKFunction] = []
prompt_template_engine: Any = None

def __init__(self, ai_services: list, plugins: list | None = None, *args, **kwargs):
def __init__(
self,
ai_services: list,
plugins: list | None = None,
prompt_template_engine: Any = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
for service in ai_services:
if isinstance(service, ChatCompletionClientBase):
Expand All @@ -21,6 +31,7 @@ def __init__(self, ai_services: list, plugins: list | None = None, *args, **kwar
if isinstance(service, EmbeddingGeneratorBase):
self.add_text_embedding_generation_service(service.name, service)
self.plugins = plugins
self.prompt_template_engine = prompt_template_engine

async def run_async(
self,
Expand All @@ -44,12 +55,12 @@ async def run_async(
functions = [functions]
for function in functions:
if isinstance(function, SKFunction):
results.append(await function.run_async(
variables, service=service, **kwargs
))
results.append(
await function.run_async(variables, service=service, **kwargs)
)
else:
raise TypeError(
f"Expected a SKFunction, but got {type(function)} instead"
)
#TODO: apply post-hooks
# TODO: apply post-hooks
return results if len(results) > 1 else results[0]
Loading

0 comments on commit f604287

Please sign in to comment.