|
2 | 2 | title: API
|
3 | 3 | ---
|
4 | 4 |
|
| 5 | +import Tabs from '@theme/Tabs'; |
| 6 | +import TabItem from '@theme/TabItem'; |
| 7 | + |
| 8 | +This document outlines the core functionalities provided by the RunPod API, including how to interact with endpoints, manage templates, create endpoints, and list available GPUs. |
| 9 | +These operations enable users to dynamically manage their computational resources within the RunPod environment. |
| 10 | + |
5 | 11 | ## Get Endpoints
|
6 | 12 |
|
7 |
| -To fetch all available endpoints from the API, use the get_endpoints function. |
8 |
| -This function returns a list of endpoint configurations available for use. |
| 13 | +To retrieve a comprehensive list of all available endpoint configurations within RunPod, you can use the `get_endpoints()` function. This function returns a list of endpoint configurations, allowing you to understand what's available for use in your projects. |
9 | 14 |
|
10 | 15 | ```python
|
11 | 16 | import runpod
|
12 | 17 |
|
| 18 | +# Fetching all available endpoints |
13 | 19 | endpoints = runpod.get_endpoints()
|
14 | 20 |
|
| 21 | +# Displaying the list of endpoints |
15 | 22 | print(endpoints)
|
16 | 23 | ```
|
17 | 24 |
|
18 | 25 | ## Create Template
|
19 | 26 |
|
20 |
| -You can create a new template in RunPod by specifying the name and the Docker image to use. |
21 |
| -This is useful for setting up environments with pre-defined configurations. |
| 27 | +Templates in RunPod serve as predefined configurations for setting up environments efficiently. The `create_template()` function facilitates the creation of new templates by specifying a name and a Docker image. |
22 | 28 |
|
23 | 29 | ```python
|
24 | 30 | import runpod
|
25 | 31 |
|
26 |
| - |
27 | 32 | try:
|
| 33 | + # Creating a new template with a specified name and Docker image |
| 34 | + new_template = runpod.create_template(name="test", image_name="runpod/base:0.1.0") |
28 | 35 |
|
29 |
| - new_template = runpod.create_template( |
30 |
| - name="test", |
31 |
| - image_name="runpod/base:0.1.0" |
32 |
| - ) |
33 |
| - |
| 36 | + # Output the created template details |
34 | 37 | print(new_template)
|
35 | 38 |
|
36 | 39 | except runpod.error.QueryError as err:
|
| 40 | + # Handling potential errors during template creation |
37 | 41 | print(err)
|
38 | 42 | print(err.query)
|
39 | 43 | ```
|
40 | 44 |
|
41 | 45 | ## Create Endpoint
|
42 | 46 |
|
43 |
| -Creating an endpoint involves first creating a template and then setting up the endpoint with the template ID. |
44 |
| -You can specify GPU requirements, the number of workers, and other configurations. |
45 |
| -Your Template name must be unique. |
| 47 | +Creating a new endpoint is straightforward with the `create_endpoint()` function. This function requires specifying a `name` and a `template_id`. Additional configurations such as GPUs, number of Workers, and more can also be specified to tailor the endpoint to your specific needs. |
| 48 | + |
| 49 | +<Tabs> |
| 50 | + <TabItem value="python" label="Python" default> |
46 | 51 |
|
47 | 52 | ```python
|
48 | 53 | import runpod
|
49 | 54 |
|
50 | 55 | try:
|
51 |
| - |
| 56 | + # Creating a template to use with the new endpoint |
52 | 57 | new_template = runpod.create_template(
|
53 |
| - name="test", |
54 |
| - image_name="runpod/base:0.4.4", |
55 |
| - is_serverless=True |
| 58 | + name="test", image_name="runpod/base:0.4.4", is_serverless=True |
56 | 59 | )
|
57 | 60 |
|
| 61 | + # Output the created template details |
58 | 62 | print(new_template)
|
59 | 63 |
|
| 64 | + # Creating a new endpoint using the previously created template |
60 | 65 | new_endpoint = runpod.create_endpoint(
|
61 | 66 | name="test",
|
62 | 67 | template_id=new_template["id"],
|
63 | 68 | gpu_ids="AMPERE_16",
|
64 | 69 | workers_min=0,
|
65 |
| - workers_max=1 |
| 70 | + workers_max=1, |
66 | 71 | )
|
67 | 72 |
|
| 73 | + # Output the created endpoint details |
68 | 74 | print(new_endpoint)
|
69 | 75 |
|
70 | 76 | except runpod.error.QueryError as err:
|
| 77 | + # Handling potential errors during endpoint creation |
71 | 78 | print(err)
|
72 | 79 | print(err.query)
|
73 | 80 | ```
|
74 | 81 |
|
| 82 | +</TabItem> |
| 83 | + <TabItem value="output" label="Output"> |
| 84 | + |
| 85 | +```json |
| 86 | +{ |
| 87 | + "id": "Unique_Id", |
| 88 | + "name": "YourTemplate", |
| 89 | + "imageName": "runpod/base:0.4.4", |
| 90 | + "dockerArgs": "", |
| 91 | + "containerDiskInGb": 10, |
| 92 | + "volumeInGb": 0, |
| 93 | + "volumeMountPath": "/workspace", |
| 94 | + "ports": null, |
| 95 | + "env": [], |
| 96 | + "isServerless": true |
| 97 | +} |
| 98 | +{ |
| 99 | + "id": "Unique_Id", |
| 100 | + "name": "YourTemplate", |
| 101 | + "templateId": "Unique_Id", |
| 102 | + "gpuIds": "AMPERE_16", |
| 103 | + "networkVolumeId": null, |
| 104 | + "locations": null, |
| 105 | + "idleTimeout": 5, |
| 106 | + "scalerType": "QUEUE_DELAY", |
| 107 | + "scalerValue": 4, |
| 108 | + "workersMin": 0, |
| 109 | + "workersMax": 1 |
| 110 | +} |
| 111 | +``` |
| 112 | + |
| 113 | +</TabItem> |
| 114 | +</Tabs> |
| 115 | + |
| 116 | +## Get GPUs |
| 117 | + |
| 118 | +For understanding the computational resources available, the `get_gpus()` function lists all GPUs that can be allocated to endpoints in RunPod. This enables optimal resource selection based on your computational needs. |
| 119 | + |
| 120 | +<Tabs> |
| 121 | + <TabItem value="python" label="Python" default> |
| 122 | + |
| 123 | +```python |
| 124 | +import runpod |
| 125 | +import json |
| 126 | + |
| 127 | +# Fetching all available GPUs |
| 128 | +gpus = runpod.get_gpus() |
| 129 | + |
| 130 | +# Displaying the GPUs in a formatted manner |
| 131 | +print(json.dumps(gpus, indent=2)) |
| 132 | +``` |
| 133 | + |
| 134 | +</TabItem> |
| 135 | + <TabItem value="output" label="Output"> |
| 136 | + |
75 | 137 | ```json
|
76 |
| -{'id': 'cx829zvv9e', 'name': 'testing-01', 'imageName': 'runpod/base:0.4.4', 'dockerArgs': '', 'containerDiskInGb': 10, 'volumeInGb': 0, 'volumeMountPath': '/workspace', 'ports': None, 'env': [], 'isServerless': True} |
77 |
| -{'id': '838j9id2xmmwew', 'name': 'test', 'templateId': 'cx829zvv9e', 'gpuIds': 'AMPERE_16', 'networkVolumeId': None, 'locations': None, 'idleTimeout': 5, 'scalerType': 'QUEUE_DELAY', 'scalerValue': 4, 'workersMin': 0, 'workersMax': 1} |
| 138 | +[ |
| 139 | + { |
| 140 | + "id": "NVIDIA A100 80GB PCIe", |
| 141 | + "displayName": "A100 80GB", |
| 142 | + "memoryInGb": 80 |
| 143 | + }, |
| 144 | + { |
| 145 | + "id": "NVIDIA A100-SXM4-80GB", |
| 146 | + "displayName": "A100 SXM 80GB", |
| 147 | + "memoryInGb": 80 |
| 148 | + } |
| 149 | + // Additional GPUs omitted for brevity |
| 150 | +] |
78 | 151 | ```
|
| 152 | + |
| 153 | +</TabItem> |
| 154 | +</Tabs> |
| 155 | + |
| 156 | +## Get GPU by Id |
| 157 | + |
| 158 | +Use `get_gpu()` and pass in a GPU Id to retrieve details about a specific GPU model by its ID. |
| 159 | +This is useful when understanding the capabilities and costs associated with various GPU models. |
| 160 | + |
| 161 | +<Tabs> |
| 162 | + <TabItem value="python" label="Python" default> |
| 163 | + |
| 164 | +```python |
| 165 | +import runpod |
| 166 | +import json |
| 167 | + |
| 168 | +gpus = runpod.get_gpu("NVIDIA A100 80GB PCIe") |
| 169 | + |
| 170 | +print(json.dumps(gpus, indent=2)) |
| 171 | +``` |
| 172 | + |
| 173 | +</TabItem> |
| 174 | + <TabItem value="output" label="Output"> |
| 175 | + |
| 176 | +```json |
| 177 | +{ |
| 178 | + "maxGpuCount": 8, |
| 179 | + "id": "NVIDIA A100 80GB PCIe", |
| 180 | + "displayName": "A100 80GB", |
| 181 | + "manufacturer": "Nvidia", |
| 182 | + "memoryInGb": 80, |
| 183 | + "cudaCores": 0, |
| 184 | + "secureCloud": true, |
| 185 | + "communityCloud": true, |
| 186 | + "securePrice": 1.89, |
| 187 | + "communityPrice": 1.59, |
| 188 | + "oneMonthPrice": null, |
| 189 | + "threeMonthPrice": null, |
| 190 | + "oneWeekPrice": null, |
| 191 | + "communitySpotPrice": 0.89, |
| 192 | + "secureSpotPrice": null, |
| 193 | + "lowestPrice": { |
| 194 | + "minimumBidPrice": 0.89, |
| 195 | + "uninterruptablePrice": 1.59 |
| 196 | + } |
| 197 | +} |
| 198 | +``` |
| 199 | + |
| 200 | +</TabItem> |
| 201 | + |
| 202 | +</Tabs> |
| 203 | + |
| 204 | +Through these functionalities, the RunPod API enables efficient and flexible management of computational resources, catering to a wide range of project requirements. |
0 commit comments