Skip to content

Commit 177ff09

Browse files
Add Kaggle Dataset Visualization With LLM-Generated Code Using Ollama And Judge0
1 parent a0f21c4 commit 177ff09

File tree

1 file changed

+104
-2
lines changed

1 file changed

+104
-2
lines changed

README.md

Lines changed: 104 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ print(client.get_languages())
233233

234234
### Running LLM-Generated Code
235235

236-
#### Simple Example with Ollama
236+
#### Simple Example With Ollama
237237

238238
```python
239239
# pip install judge0 ollama
@@ -270,7 +270,7 @@ result = judge0.run(source_code=code, language=judge0.C)
270270
print(f"CODE EXECUTION RESULT:\n{result.stdout}")
271271
```
272272

273-
#### Tool Calling (a.k.a. Function Calling) with Ollama
273+
#### Tool Calling (a.k.a. Function Calling) With Ollama
274274

275275
```python
276276
# pip install judge0 ollama
@@ -416,6 +416,108 @@ result = code_runner.initiate_chat(
416416
print(f"Result: {result.summary}")
417417
```
418418

419+
#### Kaggle Dataset Visualization With LLM-Generated Code Using Ollama And Judge0
420+
421+
```python
422+
# pip install judge0 ollama requests
423+
import os
424+
import zipfile
425+
426+
import judge0
427+
import requests
428+
from judge0 import File, Filesystem
429+
from ollama import Client
430+
431+
# Step 1: Download the dataset from Kaggle.
432+
dataset_url = "https://www.kaggle.com/api/v1/datasets/download/gregorut/videogamesales"
433+
dataset_zip_path = "vgsales.zip"
434+
dataset_csv_path = "vgsales.csv" # P.S.: We know the CSV file name inside the zip.
435+
436+
if not os.path.exists(dataset_csv_path): # Download only if not already downloaded.
437+
with requests.get(dataset_url) as response:
438+
with open(dataset_zip_path, "wb") as f:
439+
f.write(response.content)
440+
with zipfile.ZipFile(dataset_zip_path, "r") as f:
441+
f.extractall(".")
442+
443+
# Step 2: Prepare the submission for Judge0.
444+
with open(dataset_csv_path, "r") as f:
445+
submission = judge0.Submission(
446+
language=judge0.PYTHON_FOR_ML,
447+
additional_files=Filesystem(
448+
content=[
449+
File(name=dataset_csv_path, content=f.read()),
450+
]
451+
),
452+
)
453+
454+
# Step 3: Initialize Ollama Client. Get your free tier Ollama Cloud API key at https://ollama.com.
455+
client = Client(
456+
host="https://ollama.com",
457+
headers={"Authorization": "Bearer " + os.environ.get("OLLAMA_API_KEY")},
458+
)
459+
460+
# Step 4: Prepare the prompt, messages, tools, and choose the model.
461+
prompt = f"""
462+
I have a CSV that contains a list of video games with sales greater than 100,000 copies. It's saved in the file {dataset_csv_path}.
463+
These are the columns:
464+
- 'Rank': Ranking of overall sales
465+
- 'Name': The games name
466+
- 'Platform': Platform of the games release (i.e. PC,PS4, etc.)
467+
- 'Year': Year of the game's release
468+
- 'Genre': Genre of the game
469+
- 'Publisher': Publisher of the game
470+
- 'NA_Sales': Sales in North America (in millions)
471+
- 'EU_Sales': Sales in Europe (in millions)
472+
- 'JP_Sales': Sales in Japan (in millions)
473+
- 'Other_Sales': Sales in the rest of the world (in millions)
474+
- 'Global_Sales': Total worldwide sales.
475+
476+
I want to better understand how the sales are distributed across different genres over the years.
477+
Write Python code that analyzes the dataset based on my request, produces right chart and saves it as an image file.
478+
"""
479+
messages = [{"role": "user", "content": prompt}]
480+
tools = [
481+
{
482+
"type": "function",
483+
"function": {
484+
"name": "execute_python",
485+
"description": "Execute the Python programming language code.",
486+
"parameters": {
487+
"type": "object",
488+
"properties": {
489+
"code": {
490+
"type": "string",
491+
"description": "The code written in the Python programming language.",
492+
}
493+
},
494+
"required": ["code"],
495+
},
496+
},
497+
}
498+
]
499+
model = "qwen3-coder:480b-cloud"
500+
501+
# Step 5: Start the interaction with the model.
502+
response = client.chat(model=model, messages=messages, tools=tools)
503+
response_message = response["message"]
504+
505+
if response_message.tool_calls:
506+
for tool_call in response_message.tool_calls:
507+
if tool_call.function.name == "execute_python":
508+
code = tool_call.function.arguments["code"]
509+
print(f"CODE GENERATED BY THE MODEL:\n{code}\n")
510+
511+
submission.source_code = code
512+
result = judge0.run(submissions=submission)
513+
514+
for f in result.post_execution_filesystem:
515+
if f.name.endswith((".png", ".jpg", ".jpeg")):
516+
with open(f.name, "wb") as img_file:
517+
img_file.write(f.content)
518+
print(f"Generated image saved as: {f.name}\n")
519+
```
520+
419521
### Filesystem
420522

421523
This example shows how to use Judge0 Python SDK to:

0 commit comments

Comments
 (0)