Skip to content

Commit

Permalink
Add examples ; update test files
Browse files Browse the repository at this point in the history
  • Loading branch information
mpangrazzi committed Jan 29, 2025
1 parent f00524d commit 990cb35
Show file tree
Hide file tree
Showing 6 changed files with 162 additions and 16 deletions.
50 changes: 50 additions & 0 deletions examples/chat_with_website/chat_with_website.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
components:
converter:
type: haystack.components.converters.html.HTMLToDocument
init_parameters:
extraction_kwargs: null

fetcher:
init_parameters:
raise_on_failure: true
retry_attempts: 2
timeout: 3
user_agents:
- haystack/LinkContentFetcher/2.0.0b8
type: haystack.components.fetchers.link_content.LinkContentFetcher

llm:
init_parameters:
api_base_url: null
api_key:
env_vars:
- OPENAI_API_KEY
strict: true
type: env_var
generation_kwargs: {}
model: gpt-4o-mini
streaming_callback: null
system_prompt: null
type: haystack.components.generators.openai.OpenAIGenerator

prompt:
init_parameters:
template: |
"According to the contents of this website:
{% for document in documents %}
{{document.content}}
{% endfor %}
Answer the given question: {{query}}
Answer:
"
type: haystack.components.builders.prompt_builder.PromptBuilder

connections:
- receiver: converter.sources
sender: fetcher.streams
- receiver: prompt.documents
sender: converter.documents
- receiver: llm.prompt
sender: prompt.prompt

metadata: {}
30 changes: 30 additions & 0 deletions examples/chat_with_website/pipeline_wrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from pathlib import Path
from typing import Generator, List, Union
from haystack import Pipeline
from hayhooks.server.pipelines.utils import get_last_user_message
from hayhooks.server.utils.base_pipeline_wrapper import BasePipelineWrapper
from hayhooks.server.logger import log


URLS = ["https://haystack.deepset.ai", "https://www.redis.io", "https://ssi.inc"]


class PipelineWrapper(BasePipelineWrapper):
def setup(self) -> None:
pipeline_yaml = (Path(__file__).parent / "chat_with_website.yml").read_text()
self.pipeline = Pipeline.loads(pipeline_yaml)

def run_api(self, urls: List[str], question: str) -> str:
log.trace(f"Running pipeline with urls: {urls} and question: {question}")
result = self.pipeline.run({"fetcher": {"urls": urls}, "prompt": {"query": question}})
return result["llm"]["replies"][0]

def run_chat(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]:
log.trace(f"Running pipeline with model: {model}, messages: {messages}, body: {body}")

question = get_last_user_message(messages)
log.trace(f"Question: {question}")

# Plain pipeline run, will return a string
result = self.pipeline.run({"fetcher": {"urls": URLS}, "prompt": {"query": question}})
return result["llm"]["replies"][0]
50 changes: 50 additions & 0 deletions examples/chat_with_website_streaming/chat_with_website.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
components:
converter:
type: haystack.components.converters.html.HTMLToDocument
init_parameters:
extraction_kwargs: null

fetcher:
init_parameters:
raise_on_failure: true
retry_attempts: 2
timeout: 3
user_agents:
- haystack/LinkContentFetcher/2.0.0b8
type: haystack.components.fetchers.link_content.LinkContentFetcher

llm:
init_parameters:
api_base_url: null
api_key:
env_vars:
- OPENAI_API_KEY
strict: true
type: env_var
generation_kwargs: {}
model: gpt-4o-mini
streaming_callback: null
system_prompt: null
type: haystack.components.generators.openai.OpenAIGenerator

prompt:
init_parameters:
template: |
"According to the contents of this website:
{% for document in documents %}
{{document.content}}
{% endfor %}
Answer the given question: {{query}}
Answer:
"
type: haystack.components.builders.prompt_builder.PromptBuilder

connections:
- receiver: converter.sources
sender: fetcher.streams
- receiver: prompt.documents
sender: converter.documents
- receiver: llm.prompt
sender: prompt.prompt

metadata: {}
32 changes: 32 additions & 0 deletions examples/chat_with_website_streaming/pipeline_wrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from pathlib import Path
from typing import Generator, List, Union
from haystack import Pipeline
from hayhooks.server.pipelines.utils import get_last_user_message, streaming_generator
from hayhooks.server.utils.base_pipeline_wrapper import BasePipelineWrapper
from hayhooks.server.logger import log


URLS = ["https://haystack.deepset.ai", "https://www.redis.io", "https://ssi.inc"]


class PipelineWrapper(BasePipelineWrapper):
def setup(self) -> None:
pipeline_yaml = (Path(__file__).parent / "chat_with_website.yml").read_text()
self.pipeline = Pipeline.loads(pipeline_yaml)

def run_api(self, urls: List[str], question: str) -> str:
log.trace(f"Running pipeline with urls: {urls} and question: {question}")
result = self.pipeline.run({"fetcher": {"urls": urls}, "prompt": {"query": question}})
return result["llm"]["replies"][0]

def run_chat(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]:
log.trace(f"Running pipeline with model: {model}, messages: {messages}, body: {body}")

question = get_last_user_message(messages)
log.trace(f"Question: {question}")

# Streaming pipeline run, will return a generator
return streaming_generator(
pipeline=self.pipeline,
pipeline_run_args={"fetcher": {"urls": URLS}, "prompt": {"query": question}},
)
10 changes: 0 additions & 10 deletions tests/test_files/files/chat_with_website/pipeline_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,6 @@ def run_chat(self, model: str, messages: List[dict], body: dict) -> Union[str, G
question = get_last_user_message(messages)
log.trace(f"Question: {question}")

# Plain pipeline run, will return a string
# result = self.pipeline.run({"fetcher": {"urls": URLS}, "prompt": {"query": question}})
# return result["llm"]["replies"][0]

# Streaming pipeline run, will return a generator
# def pipeline_runner():
# self.pipeline.run({"fetcher": {"urls": URLS}, "prompt": {"query": question}})

# return streaming_generator(self.pipeline, pipeline_runner)

# Mock streaming pipeline run, will return a fixed string
# NOTE: This is used in tests, please don't change it
return "This is a mock response from the pipeline"
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,6 @@ def run_chat(self, model: str, messages: List[dict], body: dict) -> Union[str, G
question = get_last_user_message(messages)
log.trace(f"Question: {question}")

# Real pipeline run with streaming
# return streaming_generator(
# pipeline=self.pipeline,
# pipeline_run_args={"fetcher": {"urls": URLS}, "prompt": {"query": question}},
# )

# Mock streaming pipeline run, will return a fixed string
# NOTE: This is used in tests, please don't change it
if "Redis" in question:
Expand Down

0 comments on commit 990cb35

Please sign in to comment.