Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Date MCP Server + Client

A minimal Model Context Protocol (MCP) server that exposes a single tool `current_date` and `current_time` returning today's date in ISO format, and a matching client that lists tools and invokes it.

## Files
- `date_server.py`: MCP server exposing `current_date`
- `date_client.py`: Client helpers using stdio to connect via `uv`
- `main.ipynb`: Walkthrough notebook to try it end-to-end

## Prereqs
- `uv` available on your PATH
- Setup Conda like this
conda create -n mcpdev python=3.11 -y
conda activate mcpdev

# MUST install MCP first
pip install mcp

# Install correct OpenAI Agent SDK
pip install openai-agents

# Utilities you asked for
pip install python-dotenv ipython jupyterlab
pip install rich typer black isort


## Execution

The repository includes a minimal MCP server (`date_server.py`) that also exposes an
`ask_llm` tool powered by OpenAI, and a simple stdio-based client (`date_client.py`).

1) Configure an OpenAI API key (optional but required for `ask_llm`):

Create a `.env` file in this folder or export the env var. Example `.env`:

OPENAI_API_KEY=sk-...
OPENAI_MODEL=gpt-4o-mini # optional; defaults to gpt-4o-mini

If `OPENAI_API_KEY` is not set the server will still run, but `ask_llm` will return
an informative message indicating the LLM is not configured.

2) Install requirements and start the notebook or run the server/client manually.

Open the `main.ipynb` notebook and run the cells for an end-to-end walkthrough, or
run the server and client manually:

Run the MCP server (stdio transport expected by the client):

python date_server.py

In another terminal, you can run the client (`date_client.py` is an async helper —
the notebook demonstrates usage). The client will list tools and can call
`current_date`, `current_time`, and `ask_llm` (if configured).

Notes:
- The server uses `OPENAI_MODEL` (env) to control which model to call.
- Error handling has been added for the LLM call so runtime errors are returned as
readable messages to calling clients.



Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import mcp
from mcp.client.stdio import stdio_client
from mcp import StdioServerParameters


params = StdioServerParameters(command="uv", args=["run", "date_server.py"], env=None)


async def list_date_tools():
async with stdio_client(params) as streams:
async with mcp.ClientSession(*streams) as session:
await session.initialize()
tools_result = await session.list_tools()
return tools_result.tools


async def call_date_tool(tool_name, tool_args=None):
async with stdio_client(params) as streams:
async with mcp.ClientSession(*streams) as session:
await session.initialize()
result = await session.call_tool(tool_name, tool_args or {})
# Unwrap commonly returned shapes so callers get the inner value
try:
# Some MCP implementations return an object with a 'result' attr
if hasattr(result, "result"):
return result.result
# Or a mapping with a 'result' key
if isinstance(result, dict) and "result" in result:
return result["result"]
except Exception:
# Fall back to returning the raw result
pass
return result

async def ask_llm(prompt: str):
return await call_date_tool("ask_llm", {"prompt": prompt})

async def chat_mode():
print("\n🤖 AI Chat Mode — type 'exit' to quit.\n")
while True:
msg = input("You: ")
if msg.lower() in ["exit", "quit"]:
break
response = await ask_llm(msg)
# response should be a string if server returned text; otherwise stringify
try:
out = response if isinstance(response, str) else str(response)
except Exception:
out = repr(response)
print("\nAI:\n", out, "\n")
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
from datetime import date, datetime
from mcp.server.fastmcp import FastMCP
import os
import asyncio
from openai import OpenAI
from dotenv import load_dotenv

mcp = FastMCP("date_server")

# Load environment variables (allows using a .env file)
load_dotenv(override=True)

# Create OpenAI client only if API key is present. This avoids hard failures
# when running the server in environments without LLM configured.
_OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
_DEFAULT_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
client = OpenAI(api_key=_OPENAI_API_KEY) if _OPENAI_API_KEY else None


@mcp.tool()
async def current_date() -> str:
"""Return today's date in ISO format (YYYY-MM-DD)."""
return date.today().isoformat()


@mcp.tool()
async def current_time() -> str:
"""Return the current time in UTC (HH:MM:SS)."""
return datetime.utcnow().strftime("%H:%M:%S")


@mcp.tool()
async def ask_llm(prompt: str) -> str:
"""Return an AI-generated response using an OpenAI LLM.

Behavior:
- If no API key is configured, returns a friendly message.
- Calls the OpenAI client in a thread to avoid blocking the event loop
if the client is synchronous.
- Catches exceptions and returns a readable error message.
"""
if not client:
return "LLM not configured. Please set OPENAI_API_KEY in the environment."

model = os.getenv("OPENAI_MODEL", _DEFAULT_MODEL)

try:
def _call():
# Use the standard chat/completions create call on the OpenAI client.
return client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
)

response = await asyncio.to_thread(_call)

# Extract content robustly depending on response shape
content = None
if hasattr(response, "choices") and response.choices:
choice = response.choices[0]
# Try common shapes: choice.message.content or dicts
message = getattr(choice, "message", None)
if message is None and isinstance(choice, dict):
message = choice.get("message")
if message is not None:
# message may be an object or a dict
if hasattr(message, "content"):
content = message.content
elif isinstance(message, dict):
content = message.get("content")

return content or str(response)

except Exception as exc: # pragma: no cover - runtime error path
# Return an informative string; MCP clients can surface this to users.
return f"LLM error: {exc}"


if __name__ == "__main__":
mcp.run(transport="stdio")
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Date MCP Server + Client Tutorial\n",
"\n",
"This notebook demonstrates a minimal MCP server exposing `current_date` and a matching client that calls it.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os, shutil\n",
"print('CWD:', os.getcwd())\n",
"print('uv path:', shutil.which('uv'))\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Show server and client code\n",
"from pathlib import Path\n",
"print('--- date_server.py ---')\n",
"print(Path('date_server.py').read_text())\n",
"print('\\n--- date_client.py ---')\n",
"print(Path('date_client.py').read_text())\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# List tools\n",
"from date_client import list_date_tools\n",
"\n",
"[t.name for t in (await list_date_tools())]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from date_client import call_date_tool\n",
"\n",
"res = await call_date_tool(\"current_time\")\n",
"res"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"### testing ask LLM tool using \n",
"from date_client import ask_llm\n",
"\n",
"response = await ask_llm(\"Explain DevOps in simple words.\")\n",
"print(\"🤖 AI Response:\\n\", response)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import asyncio
from mcp.client.stdio import stdio_client, StdioServerParameters
from mcp.client.session import ClientSession

# IMPORTANT: variable name MUST be PARAMS (uppercase)
PARAMS = StdioServerParameters(
command="uv",
args=["run", "emoji_server.py"],
env=None
)

# -------------------- List Tools --------------------
async def list_emoji_tools():
async with stdio_client(PARAMS) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
result = await session.list_tools()
return result.tools

# -------------------- Call Tool --------------------
async def call_emoji_tool(tool_name: str, arguments: dict = None):
arguments = arguments or {}
async with stdio_client(PARAMS) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
result = await session.call_tool(
name=tool_name,
arguments=arguments
)
return result
Loading