Skip to content

Commit b17f71d

Browse files
authored
chore: update cpu and gpu type generators (#245)
1 parent b8733c6 commit b17f71d

File tree

6 files changed

+370
-428
lines changed

6 files changed

+370
-428
lines changed

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@ npm-debug.log*
1919
yarn-debug.log*
2020
yarn-error.log*
2121

22-
2322
.vercel
2423
.env*.local
2524

25+
.venv
2626

2727
helpers/__pycache__/** */
2828

docs/references/cpu-types.md

Lines changed: 264 additions & 257 deletions
Large diffs are not rendered by default.

docs/references/gpu-types.md

Lines changed: 41 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -7,46 +7,45 @@ The following list contains all GPU types available on RunPod.
77
For more information, see [GPU pricing](https://www.runpod.io/gpu-instance/pricing).
88

99
<!--
10-
Table last generated: 2025-04-02
10+
Table last generated: 2025-04-29
1111
-->
12-
13-
| GPU ID | Display Name | Memory (GB) |
14-
| ---------------------------------- | ---------------- | ----------- |
15-
| NVIDIA A100 80GB PCIe | A100 PCIe | 80 |
16-
| NVIDIA A100-SXM4-80GB | A100 SXM | 80 |
17-
| NVIDIA A30 | A30 | 24 |
18-
| NVIDIA A40 | A40 | 48 |
19-
| NVIDIA B200 | B200 | 180 |
20-
| NVIDIA H100 NVL | H100 NVL | 94 |
21-
| NVIDIA H100 PCIe | H100 PCIe | 80 |
22-
| NVIDIA H100 80GB HBM3 | H100 SXM | 80 |
23-
| NVIDIA H200 | H200 SXM | 141 |
24-
| NVIDIA L4 | L4 | 24 |
25-
| NVIDIA L40 | L40 | 48 |
26-
| NVIDIA L40S | L40S | 48 |
27-
| AMD Instinct MI300X OAM | MI300X | 192 |
28-
| NVIDIA RTX 2000 Ada Generation | RTX 2000 Ada | 16 |
29-
| NVIDIA GeForce RTX 3070 | RTX 3070 | 8 |
30-
| NVIDIA GeForce RTX 3080 | RTX 3080 | 10 |
31-
| NVIDIA GeForce RTX 3080 Ti | RTX 3080 Ti | 12 |
32-
| NVIDIA GeForce RTX 3090 | RTX 3090 | 24 |
33-
| NVIDIA GeForce RTX 3090 Ti | RTX 3090 Ti | 24 |
34-
| NVIDIA RTX 4000 Ada Generation | RTX 4000 Ada | 20 |
35-
| NVIDIA RTX 4000 SFF Ada Generation | RTX 4000 Ada SFF | 20 |
36-
| NVIDIA GeForce RTX 4070 Ti | RTX 4070 Ti | 12 |
37-
| NVIDIA GeForce RTX 4080 | RTX 4080 | 16 |
38-
| NVIDIA GeForce RTX 4080 SUPER | RTX 4080 SUPER | 16 |
39-
| NVIDIA GeForce RTX 4090 | RTX 4090 | 24 |
40-
| NVIDIA RTX 5000 Ada Generation | RTX 5000 Ada | 32 |
41-
| NVIDIA GeForce RTX 5080 | RTX 5080 | 16 |
42-
| NVIDIA GeForce RTX 5090 | RTX 5090 | 32 |
43-
| NVIDIA RTX 6000 Ada Generation | RTX 6000 Ada | 48 |
44-
| NVIDIA RTX A2000 | RTX A2000 | 6 |
45-
| NVIDIA RTX A4000 | RTX A4000 | 16 |
46-
| NVIDIA RTX A4500 | RTX A4500 | 20 |
47-
| NVIDIA RTX A5000 | RTX A5000 | 24 |
48-
| NVIDIA RTX A6000 | RTX A6000 | 48 |
49-
| Tesla V100-PCIE-16GB | Tesla V100 | 16 |
50-
| Tesla V100-FHHL-16GB | V100 FHHL | 16 |
51-
| Tesla V100-SXM2-16GB | V100 SXM2 | 16 |
52-
| Tesla V100-SXM2-32GB | V100 SXM2 32GB | 32 |
12+
| GPU ID | Display Name | Memory (GB) |
13+
|------------------------------------|------------------|---------------|
14+
| AMD Instinct MI300X OAM | MI300X | 192 |
15+
| NVIDIA A100 80GB PCIe | A100 PCIe | 80 |
16+
| NVIDIA A100-SXM4-80GB | A100 SXM | 80 |
17+
| NVIDIA A30 | A30 | 24 |
18+
| NVIDIA A40 | A40 | 48 |
19+
| NVIDIA B200 | B200 | 180 |
20+
| NVIDIA GeForce RTX 3070 | RTX 3070 | 8 |
21+
| NVIDIA GeForce RTX 3080 | RTX 3080 | 10 |
22+
| NVIDIA GeForce RTX 3080 Ti | RTX 3080 Ti | 12 |
23+
| NVIDIA GeForce RTX 3090 | RTX 3090 | 24 |
24+
| NVIDIA GeForce RTX 3090 Ti | RTX 3090 Ti | 24 |
25+
| NVIDIA GeForce RTX 4070 Ti | RTX 4070 Ti | 12 |
26+
| NVIDIA GeForce RTX 4080 | RTX 4080 | 16 |
27+
| NVIDIA GeForce RTX 4080 SUPER | RTX 4080 SUPER | 16 |
28+
| NVIDIA GeForce RTX 4090 | RTX 4090 | 24 |
29+
| NVIDIA GeForce RTX 5080 | RTX 5080 | 16 |
30+
| NVIDIA GeForce RTX 5090 | RTX 5090 | 32 |
31+
| NVIDIA H100 80GB HBM3 | H100 SXM | 80 |
32+
| NVIDIA H100 NVL | H100 NVL | 94 |
33+
| NVIDIA H100 PCIe | H100 PCIe | 80 |
34+
| NVIDIA H200 | H200 SXM | 141 |
35+
| NVIDIA L4 | L4 | 24 |
36+
| NVIDIA L40 | L40 | 48 |
37+
| NVIDIA L40S | L40S | 48 |
38+
| NVIDIA RTX 2000 Ada Generation | RTX 2000 Ada | 16 |
39+
| NVIDIA RTX 4000 Ada Generation | RTX 4000 Ada | 20 |
40+
| NVIDIA RTX 4000 SFF Ada Generation | RTX 4000 Ada SFF | 20 |
41+
| NVIDIA RTX 5000 Ada Generation | RTX 5000 Ada | 32 |
42+
| NVIDIA RTX 6000 Ada Generation | RTX 6000 Ada | 48 |
43+
| NVIDIA RTX A2000 | RTX A2000 | 6 |
44+
| NVIDIA RTX A4000 | RTX A4000 | 16 |
45+
| NVIDIA RTX A4500 | RTX A4500 | 20 |
46+
| NVIDIA RTX A5000 | RTX A5000 | 24 |
47+
| NVIDIA RTX A6000 | RTX A6000 | 48 |
48+
| Tesla V100-FHHL-16GB | V100 FHHL | 16 |
49+
| Tesla V100-PCIE-16GB | Tesla V100 | 16 |
50+
| Tesla V100-SXM2-16GB | V100 SXM2 | 16 |
51+
| Tesla V100-SXM2-32GB | V100 SXM2 32GB | 32 |

helpers/gpu_types.py

Lines changed: 29 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,69 +1,52 @@
11
import os
22
from datetime import datetime
33

4+
import pandas as pd
45
import requests
5-
from dotenv import load_dotenv
6+
from tabulate import tabulate
67

7-
load_dotenv()
8+
response = requests.post(
9+
"https://api.runpod.io/graphql",
10+
headers={
11+
"content-type": "application/json"
12+
},
13+
json={
14+
"query": "query GpuTypes { gpuTypes { id displayName memoryInGb } }"
15+
})
816

17+
response.raise_for_status()
918

10-
api_key = os.getenv("API_KEY")
11-
# URL and headers for the POST request
12-
url = "https://api.runpod.io/graphql"
13-
headers = {"content-type": "application/json", "api_key": api_key}
19+
gpu_data = response.json()
20+
gpus = gpu_data["data"]["gpuTypes"]
1421

15-
# The GraphQL query
16-
data = {"query": "query GpuTypes { gpuTypes { id displayName memoryInGb } }"}
22+
gpus_df = pd.DataFrame(gpus)
1723

18-
# Send the POST request
19-
response = requests.post(url, headers=headers, json=data)
24+
gpus_df = gpus_df[(gpus_df["id"].str.lower() != "unknown")]
2025

21-
# Check if the request was successful
22-
if response.status_code == 200:
23-
# Parse the response JSON
24-
gpu_data = response.json()
26+
gpus_df.sort_values(by="displayName").reset_index(drop=True, inplace=True)
2527

26-
# Extract GPU data
27-
gpus = gpu_data["data"]["gpuTypes"]
28+
file_path = os.path.join(
29+
os.path.dirname(__file__), "../docs/references/gpu-types.md"
30+
)
2831

29-
# Sort the GPUs by display name
30-
gpus_sorted = sorted(gpus, key=lambda x: x["displayName"])
32+
table = tabulate(gpus_df, headers=["GPU ID", "Display Name", "Memory (GB)"], tablefmt="github", showindex=False)
3133

32-
# Writing to a markdown file
33-
# relative path
34-
# os.path.join(os.path.dirname(__file__), "gpu-types.md")
35-
file_path = os.path.join(
36-
os.path.dirname(__file__), "../docs/references/gpu-types.md"
37-
)
38-
39-
with open(file_path, "w") as file:
40-
# Write the table headers
41-
date = datetime.now().strftime("%Y-%m-%d")
42-
file.write(
43-
f"""---
34+
with open(file_path, "w") as file:
35+
# Write the table headers
36+
date = datetime.now().strftime("%Y-%m-%d")
37+
file.write(
38+
f"""---
4439
title: GPU types
4540
---
4641
4742
The following list contains all GPU types available on RunPod.
4843
4944
For more information, see [GPU pricing](https://www.runpod.io/gpu-instance/pricing).
45+
5046
<!--
5147
Table last generated: {date}
5248
-->
53-
| GPU ID | Display Name | Memory (GB) |
54-
| ------ | ------------ | ----------- |
55-
"""
56-
)
57-
58-
# Write each GPU data as a row in the table
59-
for gpu in gpus_sorted:
60-
if gpu["id"] == "unknown":
61-
pass
62-
else:
63-
file.write(
64-
f"| {gpu['id']} | {gpu['displayName']} | {gpu['memoryInGb']} |\n"
65-
)
49+
{table}
50+
""")
6651

67-
print("Markdown file with GPU data created successfully.")
68-
else:
69-
print("Failed to retrieve data: ", response.status_code)
52+
print("Markdown file with GPU data created successfully.")

helpers/requirements.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
requests
2+
tabulate
3+
pandas

helpers/sls_cpu_types.py

Lines changed: 32 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -1,94 +1,47 @@
1-
import io
21
import os
32
from datetime import datetime
43

54
import pandas as pd
65
import requests
7-
from dotenv import load_dotenv
86
from tabulate import tabulate
97

10-
load_dotenv()
8+
response = requests.post(
9+
"https://api.runpod.io/graphql",
10+
headers={
11+
"content-type": "application/json"
12+
},
13+
json={
14+
"query": "query CpuTypes { cpuTypes { displayName cores threadsPerCore } }"
15+
})
1116

12-
api_key = os.getenv("API_KEY")
17+
response.raise_for_status()
1318

14-
# URL and headers for the POST request
15-
url = "https://api.runpod.io/graphql"
16-
headers = {"content-type": "application/json", "api_key": api_key}
19+
cpu_data = response.json()
20+
cpus = cpu_data["data"]["cpuTypes"]
1721

18-
# The GraphQL query
19-
data = {"query": "query CpuTypes { cpuTypes { displayName cores threadsPerCore } }"}
22+
cpus_df = pd.DataFrame(cpus)
2023

21-
# Send the POST request
22-
response = requests.post(url, headers=headers, json=data)
24+
cpus_df = cpus_df[
25+
(cpus_df["displayName"].str.lower() != "unknown")
26+
& (~cpus_df["cores"].isna())
27+
& (~cpus_df["threadsPerCore"].isna())
28+
]
2329

24-
# Check if the request was successful
25-
if response.status_code == 200:
26-
# Parse the response JSON
27-
cpu_data = response.json()
30+
cpus_df['displayName'].str.replace(r'\s{2,}', ' ', regex=True).str.strip()
31+
cpus_df.dropna(how="all")
32+
cpus_df.sort_values(by="displayName").reset_index(drop=True, inplace=True)
2833

29-
# Extract CPU data
30-
cpus = cpu_data["data"]["cpuTypes"]
34+
file_path = os.path.join(
35+
os.path.dirname(__file__), "../docs/references/cpu-types.md"
36+
)
3137

32-
# Filter out empty CPU types and rows where all values are NaN
33-
filtered_cpus = [
34-
cpu
35-
for cpu in cpus
36-
if cpu["displayName"]
37-
and cpu["displayName"].lower() != "unknown"
38-
and not pd.isna(cpu["cores"])
39-
and not pd.isna(cpu["threadsPerCore"])
40-
and not all(pd.isna(value) for value in cpu.values())
41-
]
38+
table = tabulate(cpus_df, headers=["Display Name", "Cores", "Threads Per Core"], tablefmt="github", showindex=False)
4239

43-
# Convert to DataFrame
44-
new_df = pd.DataFrame(filtered_cpus)
45-
46-
# Writing to a markdown file
47-
file_path = os.path.join(
48-
os.path.dirname(__file__), "../docs/references/cpu-types.md"
49-
)
50-
51-
# Check if the file already exists
52-
if os.path.exists(file_path):
53-
with open(file_path, "r") as file:
54-
lines = file.readlines()
55-
56-
# Find where the table ends
57-
table_end_index = 0
58-
for i, line in enumerate(lines):
59-
if line.strip() == "" and i > 0:
60-
table_end_index = i
61-
break
62-
63-
# Extract the current table
64-
current_table = "".join(lines[:table_end_index])
65-
66-
# Convert the current table to a DataFrame
67-
current_df = pd.read_csv(io.StringIO(current_table), sep="|").iloc[:, 1:-1]
68-
69-
# Append the new data to the current table
70-
updated_df = pd.concat([current_df, new_df], ignore_index=True)
71-
72-
else:
73-
# If the file does not exist, start a new DataFrame
74-
updated_df = new_df
75-
76-
# Remove rows where all values are NaN
77-
updated_df = updated_df.dropna(how="all")
78-
79-
# Sort the DataFrame alphabetically by displayName
80-
updated_df = updated_df.sort_values(by="displayName").reset_index(drop=True)
81-
82-
# Convert the updated DataFrame to markdown table format
83-
updated_table = tabulate(
84-
updated_df, headers="keys", tablefmt="pipe", showindex=False
85-
)
86-
87-
with open(file_path, "w") as file:
88-
# Write the headers and table
89-
date = datetime.now().strftime("%Y-%m-%d")
90-
file.write(
91-
f"""---
40+
with open(file_path, "w") as file:
41+
# Write the headers and table
42+
date = datetime.now().strftime("%Y-%m-%d")
43+
file.write(
44+
f"""---
9245
title: Serverless CPU types
9346
---
9447
@@ -97,10 +50,7 @@
9750
<!--
9851
Table last generated: {date}
9952
-->
100-
{updated_table}
101-
"""
102-
)
53+
{table}
54+
""")
10355

104-
print("Markdown file with CPU data updated successfully.")
105-
else:
106-
print("Failed to retrieve data: ", response.status_code)
56+
print("Markdown file with CPU data updated successfully.")

0 commit comments

Comments
 (0)