-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcuda_test.py
More file actions
58 lines (42 loc) · 1.76 KB
/
cuda_test.py
File metadata and controls
58 lines (42 loc) · 1.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# pip install torch torchvision torchaudio
# pip install numpy
# pip install psutil
# pip3 install --user gputil
# python3 cuda_test.py
import torch
def check_cuda_status():
print("--- 1. Basic Status Check ---")
# Check if a CUDA-enabled device is visible to PyTorch
if torch.cuda.is_available():
print("✅ CUDA is available and recognized by PyTorch.")
else:
print("❌ CUDA is NOT available. Check drivers/PyTorch installation.")
return
# Check device count and name
device_count = torch.cuda.device_count()
print(f"Total CUDA Devices Found: {device_count}")
# Check the name of the first device (usually index 0)
device_name = torch.cuda.get_device_name(0)
print(f"Device Name (Device 0): {device_name}")
major, minor = torch.cuda.get_device_capability(0)
print(f"Compute Capability: {major}.{minor}")
# Set the device to the GPU
device = torch.device("cuda")
print("\n--- 2. Simple Computation Test ---")
# Create two tensors (matrices) on the GPU
a = torch.rand(5, 5, device=device)
b = torch.rand(5, 5, device=device)
# Perform a matrix multiplication on the GPU
c = torch.matmul(a, b)
# Verify the result is still on the GPU
print(f"Tensor A Device: {a.device}")
print(f"Result Tensor C Device: {c.device}")
print(f"Result C (on GPU, 5x5 Matrix):\n{c}")
# Check memory usage (optional but confirms usage)
allocated = round(torch.cuda.memory_allocated(0) / 1024**3, 2)
cached = round(torch.cuda.memory_reserved(0) / 1024**3, 2)
print(f"\nGPU Memory Allocated: {allocated} GB")
print(f"GPU Memory Cached: {cached} GB")
print("\n✅ CUDA computation successful.")
if __name__ == "__main__":
check_cuda_status()