Skip to content

Commit ea27286

Browse files
feat: Introduce PyEval functionality for secure Python expression evaluation (#470)
- Added a new endpoint for evaluating Python expressions via the PyEval API. - Implemented a dedicated page in the web interface for users to input and evaluate expressions safely. - Integrated the RestrictedPythonEvaluator to ensure secure execution of user-provided code. - Updated the main application routes and templates to include navigation to the new PyEval feature. - Enhanced the evaluator with additional safe built-in functions and improved error handling. This update significantly expands the capabilities of the application, allowing users to safely evaluate Python expressions in a controlled environment.
1 parent 310945a commit ea27286

File tree

6 files changed

+623
-2
lines changed

6 files changed

+623
-2
lines changed

server/api/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@
6767
api_export_dataframe,
6868
)
6969

70+
from .pyeval import pyeval_routes
71+
7072
# Aggregate all routes into a single list
7173
api_routes = [
7274
# Knowledge management endpoints
@@ -127,4 +129,4 @@
127129
Route("/api/dataframes/{df_id}/summary", endpoint=api_get_dataframe_summary, methods=["GET"]),
128130
Route("/api/dataframes/{df_id}/execute", endpoint=api_execute_dataframe_operation, methods=["POST"]),
129131
Route("/api/dataframes/{df_id}/export", endpoint=api_export_dataframe, methods=["POST"]),
130-
]
132+
] + pyeval_routes

server/api/pyeval.py

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
"""API endpoints for Python expression evaluation using the pyeval utility."""
2+
3+
import logging
4+
from typing import Any, Dict, Optional
5+
6+
from starlette.requests import Request
7+
from starlette.responses import JSONResponse
8+
from starlette.routing import Route
9+
10+
from utils.pyeval.evaluator import RestrictedPythonEvaluator, EvaluationError
11+
12+
logger = logging.getLogger(__name__)
13+
14+
15+
async def evaluate_expression(request: Request) -> JSONResponse:
16+
"""Evaluate a Python expression using the RestrictedPythonEvaluator.
17+
18+
POST /api/pyeval/evaluate
19+
20+
Request body:
21+
{
22+
"expression": "python_expression_to_evaluate",
23+
"context": {
24+
"variable_name": "variable_value",
25+
...
26+
}
27+
}
28+
29+
Response:
30+
{
31+
"success": true/false,
32+
"result": "evaluation_result" | null,
33+
"execution_time_ms": 123.45,
34+
"error_message": "error_description" | null
35+
}
36+
"""
37+
try:
38+
# Parse request body
39+
body = await request.json()
40+
expression = body.get("expression", "").strip()
41+
context = body.get("context", {})
42+
43+
# Validate input
44+
if not expression:
45+
return JSONResponse({
46+
"success": False,
47+
"result": None,
48+
"execution_time_ms": 0.0,
49+
"error_message": "Expression cannot be empty"
50+
}, status_code=400)
51+
52+
if not isinstance(context, dict):
53+
return JSONResponse({
54+
"success": False,
55+
"result": None,
56+
"execution_time_ms": 0.0,
57+
"error_message": "Context must be a dictionary"
58+
}, status_code=400)
59+
60+
logger.info(f"Evaluating expression: {expression[:100]}{'...' if len(expression) > 100 else ''}")
61+
logger.debug(f"Context variables: {list(context.keys())}")
62+
63+
# Create evaluator and evaluate expression
64+
evaluator = RestrictedPythonEvaluator()
65+
result = evaluator.evaluate_expression(expression, context)
66+
67+
# Format result for JSON response
68+
if result.success:
69+
# Convert result to string representation for JSON serialization
70+
result_str = _format_result_for_json(result.result)
71+
logger.info(f"Expression evaluated successfully in {result.execution_time_ms:.2f}ms")
72+
73+
return JSONResponse({
74+
"success": True,
75+
"result": result_str,
76+
"execution_time_ms": result.execution_time_ms,
77+
"error_message": None
78+
})
79+
else:
80+
logger.warning(f"Expression evaluation failed: {result.error_message}")
81+
return JSONResponse({
82+
"success": False,
83+
"result": None,
84+
"execution_time_ms": result.execution_time_ms,
85+
"error_message": result.error_message
86+
})
87+
88+
except Exception as e:
89+
logger.error(f"Error in evaluate_expression endpoint: {e}", exc_info=True)
90+
return JSONResponse({
91+
"success": False,
92+
"result": None,
93+
"execution_time_ms": 0.0,
94+
"error_message": f"Server error: {str(e)}"
95+
}, status_code=500)
96+
97+
98+
def _format_result_for_json(result: Any) -> str:
99+
"""Format evaluation result for JSON serialization.
100+
101+
Args:
102+
result: The result from expression evaluation
103+
104+
Returns:
105+
String representation of the result suitable for JSON response
106+
"""
107+
try:
108+
# Handle pandas DataFrames specially
109+
if hasattr(result, 'to_string'):
110+
# This covers pandas DataFrames and Series
111+
return result.to_string()
112+
113+
# Handle numpy arrays
114+
elif hasattr(result, 'tolist'):
115+
return str(result)
116+
117+
# Handle other iterables (but not strings)
118+
elif hasattr(result, '__iter__') and not isinstance(result, (str, bytes)):
119+
# Convert to string representation, but limit length for very large iterables
120+
str_result = str(result)
121+
if len(str_result) > 10000:
122+
return str_result[:10000] + "... (truncated)"
123+
return str_result
124+
125+
# Handle basic types
126+
else:
127+
str_result = str(result)
128+
# Limit very long string results
129+
if len(str_result) > 10000:
130+
return str_result[:10000] + "... (truncated)"
131+
return str_result
132+
133+
except Exception as e:
134+
logger.warning(f"Error formatting result for JSON: {e}")
135+
return f"<Error formatting result: {str(e)}>"
136+
137+
138+
# API routes for pyeval functionality
139+
pyeval_routes = [
140+
Route("/api/pyeval/evaluate", endpoint=evaluate_expression, methods=["POST"]),
141+
]

server/main.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -391,6 +391,11 @@ async def dataframe_detail_page(request: Request):
391391
"dataframe_detail.html", {"request": request, "current_page": "dataframes", "df_id": df_id}
392392
)
393393

394+
async def pyeval_page(request: Request):
395+
return templates.TemplateResponse(
396+
"pyeval.html", {"request": request, "current_page": "pyeval"}
397+
)
398+
394399

395400
# --- Add routes ---
396401
routes = [
@@ -402,6 +407,7 @@ async def dataframe_detail_page(request: Request):
402407
Route("/visualizations", endpoint=visualizations_page, methods=["GET"]),
403408
Route("/dataframes", endpoint=dataframes_page, methods=["GET"]),
404409
Route("/dataframes/{df_id}", endpoint=dataframe_detail_page, methods=["GET"]),
410+
Route("/pyeval", endpoint=pyeval_page, methods=["GET"]),
405411
Route("/config", endpoint=config_page, methods=["GET"]),
406412
Route("/sse", endpoint=handle_sse),
407413
Mount("/messages/", app=sse.handle_post_message),

server/templates/base.html

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -370,6 +370,7 @@ <h1>MCP Knowledge Server</h1>
370370
<a href="/knowledge" class="nav-link {{ 'active' if current_page == 'knowledge' else '' }}">Knowledge</a>
371371
<a href="/jobs" class="nav-link {{ 'active' if current_page == 'jobs' else '' }}">Background Jobs</a>
372372
<a href="/dataframes" class="nav-link {{ 'active' if current_page == 'dataframes' else '' }}">DataFrames</a>
373+
<a href="/pyeval" class="nav-link {{ 'active' if current_page == 'pyeval' else '' }}">PyEval</a>
373374
<a href="/tools" class="nav-link {{ 'active' if current_page == 'tools' else '' }}">Tools</a>
374375
<a href="/tool-history" class="nav-link {{ 'active' if current_page == 'tool_history' else '' }}">Tool
375376
History</a>

0 commit comments

Comments
 (0)