Skip to content

Commit b57bf7b

Browse files
committed
Add ALTK JSON Processing native plugin
Signed-off-by: Jason Tsay <[email protected]>
1 parent fa92384 commit b57bf7b

File tree

7 files changed

+2135
-105
lines changed

7 files changed

+2135
-105
lines changed
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# ALTKJsonProcessor for Context Forge MCP Gateway
2+
3+
> Author: Jason Tsay
4+
> Version: 0.1.0
5+
6+
Uses JSON Processor from ALTK to extract data from long JSON responses. See the [ALTK](https://altk.ai/) and the [JSON Processor component in the ALTK repo](https://github.com/AgentToolkit/agent-lifecycle-toolkit/tree/main/altk/post_tool/code_generation) for more details on how the component works.
7+
8+
## Hooks
9+
- `tool_post_invoke` - Detects long JSON responses and processes as necessary
10+
11+
## Installation
12+
13+
1. Copy .env.example .env
14+
2. Enable plugins in `.env`
15+
3. Enable the "ALTKJsonProcessor" plugin in `plugins/config.yaml`.
16+
4. Install the optional dependency `altk` (i.e. `pip install mcp-context-forge[altk]`)
17+
18+
## Configuration
19+
20+
```yaml
21+
- name: "ALTKJsonProcessor"
22+
kind: "plugins.altk_json_processor.json_processor.ALTKJsonProcessor"
23+
description: "Uses JSON Processor from ALTK to extract data from long JSON responses"
24+
hooks: ["tool_post_invoke"]
25+
tags: ["plugin"]
26+
mode: "enforce"
27+
priority: 150
28+
conditions: []
29+
config:
30+
jsonprocessor_query: ""
31+
llm_provider: "watsonx" # one of watsonx, ollama, openai, anthropic
32+
watsonx:
33+
wx_api_key: "" # optional, can define WX_API_KEY instead
34+
wx_project_id: "" # optional, can define WX_PROJECT_ID instead
35+
wx_url: "https://us-south.ml.cloud.ibm.com"
36+
ollama:
37+
ollama_url: "http://localhost:11434"
38+
openai:
39+
api_key: "" # optional, can define OPENAI_API_KEY instead
40+
anthropic:
41+
api_key: "" # optional, can define ANTHROPIC_API_KEY instead
42+
length_threshold: 100000
43+
model_id: "ibm/granite-3-3-8b-instruct" # note that this changes depending on provider
44+
```
45+
46+
- `length_threshold` is the minimum number of characters before activating this component
47+
- `jsonprocessor_query` is a natural language statement of what the long response should be processed for. For an example of a long response for a musical artist: "get full metadata for all albums from the artist's discography in json format"
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# -*- coding: utf-8 -*-
2+
"""MCP Gateway ALTKJsonProcessor Plugin - Uses JSON Processor from ALTK to extract data from long JSON responses.
3+
4+
Copyright 2025
5+
SPDX-License-Identifier: Apache-2.0
6+
Authors: Jason Tsay
7+
8+
"""
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
# -*- coding: utf-8 -*-
2+
"""Uses JSON Processor from ALTK to extract data from long JSON responses.
3+
4+
Copyright 2025
5+
SPDX-License-Identifier: Apache-2.0
6+
Authors: Jason Tsay
7+
8+
This module loads configurations for plugins.
9+
"""
10+
11+
# Standard
12+
import json
13+
import os
14+
from typing import cast
15+
16+
# Third-Party
17+
from altk.core.llm import get_llm
18+
19+
# Third-party
20+
from altk.core.toolkit import AgentPhase
21+
from altk.post_tool.code_generation.code_generation import CodeGenerationComponent, CodeGenerationComponentConfig
22+
from altk.post_tool.core.toolkit import CodeGenerationRunInput, CodeGenerationRunOutput
23+
24+
# First-Party
25+
from mcpgateway.plugins.framework import (
26+
Plugin,
27+
PluginConfig,
28+
PluginContext,
29+
ToolPostInvokePayload,
30+
ToolPostInvokeResult,
31+
)
32+
from mcpgateway.services.logging_service import LoggingService
33+
34+
# Initialize logging service first
35+
logging_service = LoggingService()
36+
logger = logging_service.get_logger(__name__)
37+
38+
39+
class ALTKJsonProcessor(Plugin):
40+
"""Uses JSON Processor from ALTK to extract data from long JSON responses."""
41+
42+
def __init__(self, config: PluginConfig):
43+
"""Entry init block for plugin.
44+
45+
Args:
46+
config: the plugin configuration
47+
"""
48+
super().__init__(config)
49+
if config.config:
50+
self._cfg = config.config
51+
else:
52+
self._cfg = {}
53+
54+
async def tool_post_invoke(self, payload: ToolPostInvokePayload, context: PluginContext) -> ToolPostInvokeResult:
55+
"""Plugin hook run after a tool is invoked.
56+
57+
Args:
58+
payload: The tool result payload to be analyzed.
59+
context: Contextual information about the hook call.
60+
61+
Raises:
62+
ValueError: if a provider api key is not provided in either config or env var
63+
64+
Returns:
65+
The result of the plugin's analysis, including whether the tool result should proceed.
66+
"""
67+
provider = self._cfg["llm_provider"]
68+
llm_client = None
69+
if provider == "watsonx":
70+
watsonx_client = get_llm("watsonx")
71+
if len(self._cfg["watsonx"]["wx_api_key"]) > 0:
72+
api_key = self._cfg["watsonx"]["wx_api_key"]
73+
else:
74+
# Note that we assume here this env var exists and should throw an error if not
75+
api_key = os.environ["WX_API_KEY"]
76+
if len(self._cfg["watsonx"]["wx_project_id"]) > 0:
77+
project_id = self._cfg["watsonx"]["wx_project_id"]
78+
else:
79+
# Note that we assume here this env var exists and should throw an error if not
80+
project_id = os.environ["WX_PROJECT_ID"]
81+
llm_client = watsonx_client(model_id=self._cfg["model_id"], api_key=api_key, project_id=project_id, url=self._cfg["watsonx"]["wx_url"])
82+
elif provider == "openai":
83+
openai_client = get_llm("openai.sync")
84+
if len(self._cfg["openai"]["api_key"]) > 0:
85+
api_key = self._cfg["openai"]["api_key"]
86+
else:
87+
# Note that we assume here this env var exists and should throw an error if not
88+
api_key = os.environ["OPENAI_API_KEY"]
89+
llm_client = openai_client(api_key=api_key, model=self._cfg["model_id"])
90+
elif provider == "ollama":
91+
ollama_client = get_llm("litellm.ollama")
92+
llm_client = ollama_client(api_url=self._cfg["ollama"]["ollama_url"], model_name=self._cfg["model_id"])
93+
elif provider == "anthropic":
94+
anthropic_client = get_llm("litellm")
95+
model_path = f"anthropic/{self._cfg['model_id']}"
96+
if len(self._cfg["anthropic"]["api_key"]) > 0:
97+
api_key = self._cfg["anthropic"]["api_key"]
98+
else:
99+
# Note that we assume here this env var exists and should throw an error if not
100+
api_key = os.environ["ANTHROPIC_API_KEY"]
101+
llm_client = anthropic_client(model_name=model_path, api_key=api_key)
102+
else:
103+
raise ValueError("Unknown provider given for 'llm_provider' in plugin config!")
104+
105+
config = CodeGenerationComponentConfig(llm_client=llm_client, use_docker_sandbox=False)
106+
107+
response_json = None
108+
response_str = None
109+
if "content" in payload.result:
110+
if len(payload.result["content"]) > 0:
111+
content = payload.result["content"][0]
112+
if "type" in content and content["type"] == "text":
113+
response_str = content["text"]
114+
try:
115+
response_json = json.loads(response_str)
116+
except json.decoder.JSONDecodeError:
117+
# ignore anything that's not json
118+
pass
119+
120+
if response_json and response_str and len(response_str) > self._cfg["length_threshold"]:
121+
logger.info("Long JSON response detected, using ALTK JSON Processor...")
122+
codegen = CodeGenerationComponent(config=config)
123+
nl_query = self._cfg["jsonprocessor_query"]
124+
input_data = CodeGenerationRunInput(messages=[], nl_query=nl_query, tool_response=response_json)
125+
output = codegen.process(input_data, AgentPhase.RUNTIME)
126+
output = cast(CodeGenerationRunOutput, output)
127+
payload.result["content"][0]["text"] = output.result
128+
logger.debug(f"ALTK processed response: {output.result}")
129+
return ToolPostInvokeResult(continue_processing=True, modified_payload=payload)
130+
131+
return ToolPostInvokeResult(continue_processing=True)
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
description: "Uses JSON Processor from ALTK to extract data from long JSON responses"
2+
author: "Jason Tsay"
3+
version: "0.1.0"
4+
available_hooks:
5+
- "tool_post_hook"
6+
default_configs:

plugins/config.yaml

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -869,3 +869,33 @@ plugins:
869869
enable_caching: true
870870
cache_ttl: 3600
871871
max_text_length: 10000
872+
873+
# ALTK: JSON Processor
874+
- name: "ALTKJsonProcessor"
875+
kind: "plugins.altk_json_processor.json_processor.ALTKJsonProcessor"
876+
description: "Uses JSON Processor from ALTK to extract data from long JSON responses"
877+
version: "0.1.0"
878+
author: "Jason Tsay"
879+
hooks: ["tool_post_invoke"]
880+
tags: ["plugin"]
881+
mode: "disabled" # enforce | permissive | disabled
882+
priority: 150
883+
conditions:
884+
# Apply to specific tools/servers
885+
- server_ids: [] # Apply to all servers
886+
tenant_ids: [] # Apply to all tenants
887+
config:
888+
jsonprocessor_query: ""
889+
llm_provider: "watsonx" # one of watsonx, ollama, openai, anthropic
890+
watsonx:
891+
wx_api_key: "" # optional, can define WX_API_KEY instead
892+
wx_project_id: "" # optional, can define WX_PROJECT_ID instead
893+
wx_url: "https://us-south.ml.cloud.ibm.com"
894+
ollama:
895+
ollama_url: "http://localhost:11434"
896+
openai:
897+
api_key: "" # optional, can define OPENAI_API_KEY instead
898+
anthropic:
899+
api_key: "" # optional, can define ANTHROPIC_API_KEY instead
900+
model_id: "ibm/granite-3-3-8b-instruct" # note that this changes depending on provider
901+
length_threshold: 100000

pyproject.toml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ dev = [
116116
"pydocstyle>=6.3.0",
117117
"pylint>=3.3.9",
118118
"pylint-pydantic>=0.3.5",
119-
"pyre-check>=0.9.25",
119+
#"pyre-check>=0.9.25", # incompatible with altk, superceded by pyrefly?
120120
"pyrefly>=0.35.0",
121121
"pyright>=1.1.406",
122122
"pyroma>=5.0",
@@ -213,6 +213,11 @@ asyncpg = [
213213
"asyncpg>=0.30.0",
214214
]
215215

216+
# Agent Lifecycle Toolkit(optional)
217+
altk = [
218+
"agent-lifecycle-toolkit>=0.4.0",
219+
]
220+
216221
# gRPC Support (EXPERIMENTAL - optional, disabled by default)
217222
# Install with: pip install mcp-contextforge-gateway[grpc]
218223
grpc = [

0 commit comments

Comments
 (0)