diff --git a/examples/pytorch/mnist-ddp/mnist.ipynb b/examples/pytorch/mnist-ddp/mnist.ipynb new file mode 100644 index 0000000000..d361576192 --- /dev/null +++ b/examples/pytorch/mnist-ddp/mnist.ipynb @@ -0,0 +1,627 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# PyTorch DDP Fashion MNIST Training Example" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "This example demonstrates how to train a convolutional neural network to classify images using the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset and [PyTorch DDP](https://pytorch.org/tutorials/intermediate/ddp_tutorial.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "## Install the Kubeflow SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO (astefanutti): Change to the Kubeflow SDK when its available.\n", + "!pip install git+https://github.com/kubeflow/training-operator.git@master#subdirectory=sdk_v2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install the PyTorch dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install torch==2.5.1\n", + "!pip install torchvision==0.20.0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define the training function" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "def train_fashion_mnist(params):\n", + " import os\n", + "\n", + " import torch\n", + " import torch.distributed as dist\n", + " import torch.nn.functional as F\n", + " import torch.nn as nn\n", + " import torchvision.transforms as transforms\n", + " from torch.nn.parallel import DistributedDataParallel\n", + " from torch.optim.lr_scheduler import StepLR\n", + " from torch.utils.data import DataLoader\n", + " from torch.utils.data.distributed import DistributedSampler\n", + " from torchvision.datasets import FashionMNIST\n", + "\n", + " # Define the PyTorch CNN model to be trained\n", + " class Net(nn.Module):\n", + " def __init__(self):\n", + " super(Net, self).__init__()\n", + " self.conv1 = nn.Conv2d(1, 20, 5, 1)\n", + " self.conv2 = nn.Conv2d(20, 50, 5, 1)\n", + " self.fc1 = nn.Linear(4 * 4 * 50, 500)\n", + " self.fc2 = nn.Linear(500, 10)\n", + "\n", + " def forward(self, x):\n", + " x = F.relu(self.conv1(x))\n", + " x = F.max_pool2d(x, 2, 2)\n", + " x = F.relu(self.conv2(x))\n", + " x = F.max_pool2d(x, 2, 2)\n", + " x = x.view(-1, 4 * 4 * 50)\n", + " x = F.relu(self.fc1(x))\n", + " x = self.fc2(x)\n", + " return F.log_softmax(x, dim=1)\n", + "\n", + " # Use NCCL is a GPU is available, otherwise use Gloo as communication backend\n", + " device, backend = (\"cuda\", \"nccl\") if torch.cuda.is_available() else (\"cpu\", \"gloo\")\n", + "\n", + " print(f\"Using Device: {device}, Backend: {backend}\")\n", + "\n", + " # Setup PyTorch Distributed\n", + " local_rank = int(os.getenv(\"LOCAL_RANK\", 0))\n", + " dist.init_process_group(backend=backend)\n", + "\n", + " print(\n", + " \"Distributed Training for WORLD_SIZE: {}, RANK: {}, LOCAL_RANK: {}\".format(\n", + " dist.get_world_size(),\n", + " dist.get_rank(),\n", + " local_rank,\n", + " )\n", + " )\n", + "\n", + " # Create the model and load it into the device\n", + " device = torch.device(f\"{device}:{local_rank}\")\n", + " model = DistributedDataParallel(Net().to(device))\n", + "\n", + " # Retrieve the Fashion-MNIST dataset\n", + " dataset = FashionMNIST(\n", + " \"./data\",\n", + " train=True,\n", + " download=True,\n", + " transform=transforms.Compose([transforms.ToTensor()]),\n", + " )\n", + "\n", + " # Shard the dataset accross workers\n", + " train_loader = DataLoader(\n", + " dataset,\n", + " batch_size=100,\n", + " sampler=DistributedSampler(dataset),\n", + " pin_memory=torch.cuda.is_available(),\n", + " )\n", + "\n", + " # Setup the optimization loop\n", + " criterion = nn.CrossEntropyLoss().to(device)\n", + " optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n", + " scheduler = StepLR(optimizer, step_size=10, gamma=0.8)\n", + "\n", + " for epoch in range(1, params.get(\"epochs\", 1) + 1):\n", + " model.train()\n", + "\n", + " # Iterate over mini-batches from the training set\n", + " for batch_idx, (inputs, labels) in enumerate(train_loader):\n", + " # Copy the data to the GPU device if available\n", + " inputs, labels = inputs.to(device), labels.to(device)\n", + " # Forward pass\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, labels)\n", + " # Backward pass\n", + " optimizer.zero_grad()\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " if batch_idx % 10 == 0 and dist.get_rank() == 0:\n", + " print(\n", + " \"Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\".format(\n", + " epoch,\n", + " batch_idx * len(inputs),\n", + " len(train_loader.dataset),\n", + " 100.0 * batch_idx / len(train_loader),\n", + " loss.item(),\n", + " )\n", + " )\n", + "\n", + " scheduler.step()\n", + "\n", + " # Wait for the distributed training to complete\n", + " dist.barrier()\n", + " if dist.get_rank() == 0:\n", + " print(\"Training is finished\")\n", + "\n", + " # Finally clean up PyTorch distributed\n", + " dist.destroy_process_group()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dry-run the training locally" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using Device: cpu, Backend: gloo\n", + "Distributed Training for WORLD_SIZE: 1, RANK: 0, LOCAL_RANK: 0\n", + "Train Epoch: 1 [0/60000 (0%)]\tLoss: 2.299584\n", + "Train Epoch: 1 [1000/60000 (2%)]\tLoss: 1.937619\n", + "Train Epoch: 1 [2000/60000 (3%)]\tLoss: 1.588631\n", + "Train Epoch: 1 [3000/60000 (5%)]\tLoss: 1.564573\n", + "Train Epoch: 1 [4000/60000 (7%)]\tLoss: 1.092749\n", + "Train Epoch: 1 [5000/60000 (8%)]\tLoss: 0.843637\n", + "Train Epoch: 1 [6000/60000 (10%)]\tLoss: 0.665307\n", + "Train Epoch: 1 [7000/60000 (12%)]\tLoss: 0.741183\n", + "Train Epoch: 1 [8000/60000 (13%)]\tLoss: 0.628587\n", + "Train Epoch: 1 [9000/60000 (15%)]\tLoss: 0.750320\n", + "Train Epoch: 1 [10000/60000 (17%)]\tLoss: 0.610109\n", + "Train Epoch: 1 [11000/60000 (18%)]\tLoss: 0.551585\n", + "Train Epoch: 1 [12000/60000 (20%)]\tLoss: 0.605077\n", + "Train Epoch: 1 [13000/60000 (22%)]\tLoss: 0.539692\n", + "Train Epoch: 1 [14000/60000 (23%)]\tLoss: 0.666732\n", + "Train Epoch: 1 [15000/60000 (25%)]\tLoss: 0.646964\n", + "Train Epoch: 1 [16000/60000 (27%)]\tLoss: 0.492674\n", + "Train Epoch: 1 [17000/60000 (28%)]\tLoss: 0.491275\n", + "Train Epoch: 1 [18000/60000 (30%)]\tLoss: 0.396012\n", + "Train Epoch: 1 [19000/60000 (32%)]\tLoss: 0.526682\n", + "Train Epoch: 1 [20000/60000 (33%)]\tLoss: 0.452366\n", + "Train Epoch: 1 [21000/60000 (35%)]\tLoss: 0.408348\n", + "Train Epoch: 1 [22000/60000 (37%)]\tLoss: 0.366564\n", + "Train Epoch: 1 [23000/60000 (38%)]\tLoss: 0.535975\n", + "Train Epoch: 1 [24000/60000 (40%)]\tLoss: 0.466486\n", + "Train Epoch: 1 [25000/60000 (42%)]\tLoss: 0.485761\n", + "Train Epoch: 1 [26000/60000 (43%)]\tLoss: 0.641456\n", + "Train Epoch: 1 [27000/60000 (45%)]\tLoss: 0.389193\n", + "Train Epoch: 1 [28000/60000 (47%)]\tLoss: 0.374947\n", + "Train Epoch: 1 [29000/60000 (48%)]\tLoss: 0.366143\n", + "Train Epoch: 1 [30000/60000 (50%)]\tLoss: 0.393355\n", + "Train Epoch: 1 [31000/60000 (52%)]\tLoss: 0.425496\n", + "Train Epoch: 1 [32000/60000 (53%)]\tLoss: 0.377846\n", + "Train Epoch: 1 [33000/60000 (55%)]\tLoss: 0.317635\n", + "Train Epoch: 1 [34000/60000 (57%)]\tLoss: 0.527301\n", + "Train Epoch: 1 [35000/60000 (58%)]\tLoss: 0.592480\n", + "Train Epoch: 1 [36000/60000 (60%)]\tLoss: 0.372165\n", + "Train Epoch: 1 [37000/60000 (62%)]\tLoss: 0.404120\n", + "Train Epoch: 1 [38000/60000 (63%)]\tLoss: 0.468133\n", + "Train Epoch: 1 [39000/60000 (65%)]\tLoss: 0.522341\n", + "Train Epoch: 1 [40000/60000 (67%)]\tLoss: 0.402095\n", + "Train Epoch: 1 [41000/60000 (68%)]\tLoss: 0.514725\n", + "Train Epoch: 1 [42000/60000 (70%)]\tLoss: 0.437190\n", + "Train Epoch: 1 [43000/60000 (72%)]\tLoss: 0.373140\n", + "Train Epoch: 1 [44000/60000 (73%)]\tLoss: 0.339573\n", + "Train Epoch: 1 [45000/60000 (75%)]\tLoss: 0.256387\n", + "Train Epoch: 1 [46000/60000 (77%)]\tLoss: 0.459226\n", + "Train Epoch: 1 [47000/60000 (78%)]\tLoss: 0.408054\n", + "Train Epoch: 1 [48000/60000 (80%)]\tLoss: 0.460252\n", + "Train Epoch: 1 [49000/60000 (82%)]\tLoss: 0.373420\n", + "Train Epoch: 1 [50000/60000 (83%)]\tLoss: 0.295203\n", + "Train Epoch: 1 [51000/60000 (85%)]\tLoss: 0.474386\n", + "Train Epoch: 1 [52000/60000 (87%)]\tLoss: 0.411164\n", + "Train Epoch: 1 [53000/60000 (88%)]\tLoss: 0.359452\n", + "Train Epoch: 1 [54000/60000 (90%)]\tLoss: 0.309372\n", + "Train Epoch: 1 [55000/60000 (92%)]\tLoss: 0.356350\n", + "Train Epoch: 1 [56000/60000 (93%)]\tLoss: 0.363343\n", + "Train Epoch: 1 [57000/60000 (95%)]\tLoss: 0.482411\n", + "Train Epoch: 1 [58000/60000 (97%)]\tLoss: 0.353806\n", + "Train Epoch: 1 [59000/60000 (98%)]\tLoss: 0.274754\n", + "Training is finished\n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "# Set the Torch Distributed env variables so the training function can be run in the notebook\n", + "# See https://pytorch.org/docs/stable/elastic/run.html#environment-variables\n", + "os.environ[\"RANK\"] = \"0\"\n", + "os.environ[\"LOCAL_RANK\"] = \"0\"\n", + "os.environ[\"WORLD_SIZE\"] = \"1\"\n", + "os.environ[\"MASTER_ADDR\"] = \"localhost\"\n", + "os.environ[\"MASTER_PORT\"] = \"1234\"\n", + "\n", + "# Run the training function locally\n", + "train_fashion_mnist({\"epochs\": 1})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create the Kubeflow Training Client" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from kubeflow.training import Trainer, TrainingClient\n", + "client = TrainingClient()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## List the training runtimes" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Runtime(name='torch-distributed', phase='pre-training', accelerator='Unknown', accelerator_count='Unknown')\n" + ] + } + ], + "source": [ + "for runtime in client.list_runtimes():\n", + " print(runtime)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the distributed training Job" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [], + "source": [ + "job_name = client.train(\n", + " # Use one the of the training runtimes installed on your Kubernetes cluster\n", + " runtime_ref=\"torch-distributed\",\n", + " trainer=Trainer(\n", + " func=train_fashion_mnist,\n", + " func_args={\n", + " \"epochs\": 10,\n", + " },\n", + " # Set how many worker Pods you want the job to be distributed into\n", + " num_nodes=4,\n", + " # Set the resources for each worker Pod\n", + " resources_per_node={\n", + " \"cpu\": 8,\n", + " \"memory\": \"16Gi\",\n", + " \"nvidia.com/gpu\": 1,\n", + " },\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Check the training job components" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TrainJob(name='u595a4071b5b', runtime_ref='torch-distributed', creation_timestamp=datetime.datetime(2025, 1, 28, 18, 7, 56, tzinfo=tzutc()), components=[Component(name='trainer-node-0', status='Pending', device='gpu', device_count='1', pod_name='u595a4071b5b-trainer-node-0-0-dpqbq'), Component(name='trainer-node-1', status='Pending', device='gpu', device_count='1', pod_name='u595a4071b5b-trainer-node-0-1-js6z8'), Component(name='trainer-node-2', status='Pending', device='gpu', device_count='1', pod_name='u595a4071b5b-trainer-node-0-2-ft7l5'), Component(name='trainer-node-3', status='Pending', device='gpu', device_count='1', pod_name='u595a4071b5b-trainer-node-0-3-7brgp')], status='Created')\n" + ] + } + ], + "source": [ + "job = client.get_job(job_name)\n", + "print(job)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Watch the training job logs" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[trainer-node]: Using Device: cuda, Backend: nccl\n", + "[trainer-node]: Distributed Training for WORLD_SIZE: 4, RANK: 0, LOCAL_RANK: 0\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to ./data/FashionMNIST/raw/train-images-idx3-ubyte.gz\n", + "100%|██████████| 26.4M/26.4M [00:01<00:00, 13.8MB/s]\n", + "[trainer-node]: Extracting ./data/FashionMNIST/raw/train-images-idx3-ubyte.gz to ./data/FashionMNIST/raw\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz to ./data/FashionMNIST/raw/train-labels-idx1-ubyte.gz\n", + "100%|██████████| 29.5k/29.5k [00:00<00:00, 327kB/s]\n", + "[trainer-node]: Extracting ./data/FashionMNIST/raw/train-labels-idx1-ubyte.gz to ./data/FashionMNIST/raw\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz to ./data/FashionMNIST/raw/t10k-images-idx3-ubyte.gz\n", + "100%|██████████| 4.42M/4.42M [00:00<00:00, 6.11MB/s]\n", + "[trainer-node]: Extracting ./data/FashionMNIST/raw/t10k-images-idx3-ubyte.gz to ./data/FashionMNIST/raw\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz\n", + "[trainer-node]: Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz to ./data/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz\n", + "100%|██████████| 5.15k/5.15k [00:00<00:00, 49.0MB/s]\n", + "[trainer-node]: Extracting ./data/FashionMNIST/raw/t10k-labels-idx1-ubyte.gz to ./data/FashionMNIST/raw\n", + "[trainer-node]: Train Epoch: 1 [0/60000 (0%)]\tLoss: 2.298429\n", + "[trainer-node]: Train Epoch: 1 [1000/60000 (7%)]\tLoss: 1.735639\n", + "[trainer-node]: Train Epoch: 1 [2000/60000 (13%)]\tLoss: 1.908234\n", + "[trainer-node]: Train Epoch: 1 [3000/60000 (20%)]\tLoss: 1.296376\n", + "[trainer-node]: Train Epoch: 1 [4000/60000 (27%)]\tLoss: 1.194973\n", + "[trainer-node]: Train Epoch: 1 [5000/60000 (33%)]\tLoss: 0.952793\n", + "[trainer-node]: Train Epoch: 1 [6000/60000 (40%)]\tLoss: 0.752052\n", + "[trainer-node]: Train Epoch: 1 [7000/60000 (47%)]\tLoss: 0.718148\n", + "[trainer-node]: Train Epoch: 1 [8000/60000 (53%)]\tLoss: 0.475267\n", + "[trainer-node]: Train Epoch: 1 [9000/60000 (60%)]\tLoss: 0.646854\n", + "[trainer-node]: Train Epoch: 1 [10000/60000 (67%)]\tLoss: 0.413865\n", + "[trainer-node]: Train Epoch: 1 [11000/60000 (73%)]\tLoss: 0.551079\n", + "[trainer-node]: Train Epoch: 1 [12000/60000 (80%)]\tLoss: 0.446894\n", + "[trainer-node]: Train Epoch: 1 [13000/60000 (87%)]\tLoss: 0.527980\n", + "[trainer-node]: Train Epoch: 1 [14000/60000 (93%)]\tLoss: 0.489114\n", + "[trainer-node]: Train Epoch: 2 [0/60000 (0%)]\tLoss: 0.488229\n", + "[trainer-node]: Train Epoch: 2 [1000/60000 (7%)]\tLoss: 0.426046\n", + "[trainer-node]: Train Epoch: 2 [2000/60000 (13%)]\tLoss: 0.466134\n", + "[trainer-node]: Train Epoch: 2 [3000/60000 (20%)]\tLoss: 0.385700\n", + "[trainer-node]: Train Epoch: 2 [4000/60000 (27%)]\tLoss: 0.404375\n", + "[trainer-node]: Train Epoch: 2 [5000/60000 (33%)]\tLoss: 0.361363\n", + "[trainer-node]: Train Epoch: 2 [6000/60000 (40%)]\tLoss: 0.491555\n", + "[trainer-node]: Train Epoch: 2 [7000/60000 (47%)]\tLoss: 0.462858\n", + "[trainer-node]: Train Epoch: 2 [8000/60000 (53%)]\tLoss: 0.327273\n", + "[trainer-node]: Train Epoch: 2 [9000/60000 (60%)]\tLoss: 0.441063\n", + "[trainer-node]: Train Epoch: 2 [10000/60000 (67%)]\tLoss: 0.292195\n", + "[trainer-node]: Train Epoch: 2 [11000/60000 (73%)]\tLoss: 0.423418\n", + "[trainer-node]: Train Epoch: 2 [12000/60000 (80%)]\tLoss: 0.301645\n", + "[trainer-node]: Train Epoch: 2 [13000/60000 (87%)]\tLoss: 0.317801\n", + "[trainer-node]: Train Epoch: 2 [14000/60000 (93%)]\tLoss: 0.282251\n", + "[trainer-node]: Train Epoch: 3 [0/60000 (0%)]\tLoss: 0.386921\n", + "[trainer-node]: Train Epoch: 3 [1000/60000 (7%)]\tLoss: 0.317989\n", + "[trainer-node]: Train Epoch: 3 [2000/60000 (13%)]\tLoss: 0.386306\n", + "[trainer-node]: Train Epoch: 3 [3000/60000 (20%)]\tLoss: 0.307866\n", + "[trainer-node]: Train Epoch: 3 [4000/60000 (27%)]\tLoss: 0.316709\n", + "[trainer-node]: Train Epoch: 3 [5000/60000 (33%)]\tLoss: 0.251497\n", + "[trainer-node]: Train Epoch: 3 [6000/60000 (40%)]\tLoss: 0.440700\n", + "[trainer-node]: Train Epoch: 3 [7000/60000 (47%)]\tLoss: 0.470727\n", + "[trainer-node]: Train Epoch: 3 [8000/60000 (53%)]\tLoss: 0.295208\n", + "[trainer-node]: Train Epoch: 3 [9000/60000 (60%)]\tLoss: 0.323202\n", + "[trainer-node]: Train Epoch: 3 [10000/60000 (67%)]\tLoss: 0.223106\n", + "[trainer-node]: Train Epoch: 3 [11000/60000 (73%)]\tLoss: 0.304875\n", + "[trainer-node]: Train Epoch: 3 [12000/60000 (80%)]\tLoss: 0.280195\n", + "[trainer-node]: Train Epoch: 3 [13000/60000 (87%)]\tLoss: 0.282972\n", + "[trainer-node]: Train Epoch: 3 [14000/60000 (93%)]\tLoss: 0.221616\n", + "[trainer-node]: Train Epoch: 4 [0/60000 (0%)]\tLoss: 0.346582\n", + "[trainer-node]: Train Epoch: 4 [1000/60000 (7%)]\tLoss: 0.294129\n", + "[trainer-node]: Train Epoch: 4 [2000/60000 (13%)]\tLoss: 0.361423\n", + "[trainer-node]: Train Epoch: 4 [3000/60000 (20%)]\tLoss: 0.217486\n", + "[trainer-node]: Train Epoch: 4 [4000/60000 (27%)]\tLoss: 0.286454\n", + "[trainer-node]: Train Epoch: 4 [5000/60000 (33%)]\tLoss: 0.270020\n", + "[trainer-node]: Train Epoch: 4 [6000/60000 (40%)]\tLoss: 0.398067\n", + "[trainer-node]: Train Epoch: 4 [7000/60000 (47%)]\tLoss: 0.429407\n", + "[trainer-node]: Train Epoch: 4 [8000/60000 (53%)]\tLoss: 0.292985\n", + "[trainer-node]: Train Epoch: 4 [9000/60000 (60%)]\tLoss: 0.235372\n", + "[trainer-node]: Train Epoch: 4 [10000/60000 (67%)]\tLoss: 0.215633\n", + "[trainer-node]: Train Epoch: 4 [11000/60000 (73%)]\tLoss: 0.264095\n", + "[trainer-node]: Train Epoch: 4 [12000/60000 (80%)]\tLoss: 0.247432\n", + "[trainer-node]: Train Epoch: 4 [13000/60000 (87%)]\tLoss: 0.282042\n", + "[trainer-node]: Train Epoch: 4 [14000/60000 (93%)]\tLoss: 0.271429\n", + "[trainer-node]: Train Epoch: 5 [0/60000 (0%)]\tLoss: 0.330857\n", + "[trainer-node]: Train Epoch: 5 [1000/60000 (7%)]\tLoss: 0.273011\n", + "[trainer-node]: Train Epoch: 5 [2000/60000 (13%)]\tLoss: 0.383851\n", + "[trainer-node]: Train Epoch: 5 [3000/60000 (20%)]\tLoss: 0.198827\n", + "[trainer-node]: Train Epoch: 5 [4000/60000 (27%)]\tLoss: 0.255200\n", + "[trainer-node]: Train Epoch: 5 [5000/60000 (33%)]\tLoss: 0.245633\n", + "[trainer-node]: Train Epoch: 5 [6000/60000 (40%)]\tLoss: 0.391332\n", + "[trainer-node]: Train Epoch: 5 [7000/60000 (47%)]\tLoss: 0.349970\n", + "[trainer-node]: Train Epoch: 5 [8000/60000 (53%)]\tLoss: 0.229194\n", + "[trainer-node]: Train Epoch: 5 [9000/60000 (60%)]\tLoss: 0.222382\n", + "[trainer-node]: Train Epoch: 5 [10000/60000 (67%)]\tLoss: 0.192989\n", + "[trainer-node]: Train Epoch: 5 [11000/60000 (73%)]\tLoss: 0.230613\n", + "[trainer-node]: Train Epoch: 5 [12000/60000 (80%)]\tLoss: 0.210475\n", + "[trainer-node]: Train Epoch: 5 [13000/60000 (87%)]\tLoss: 0.260190\n", + "[trainer-node]: Train Epoch: 5 [14000/60000 (93%)]\tLoss: 0.225126\n", + "[trainer-node]: Train Epoch: 6 [0/60000 (0%)]\tLoss: 0.276963\n", + "[trainer-node]: Train Epoch: 6 [1000/60000 (7%)]\tLoss: 0.310329\n", + "[trainer-node]: Train Epoch: 6 [2000/60000 (13%)]\tLoss: 0.363408\n", + "[trainer-node]: Train Epoch: 6 [3000/60000 (20%)]\tLoss: 0.210243\n", + "[trainer-node]: Train Epoch: 6 [4000/60000 (27%)]\tLoss: 0.213673\n", + "[trainer-node]: Train Epoch: 6 [5000/60000 (33%)]\tLoss: 0.245481\n", + "[trainer-node]: Train Epoch: 6 [6000/60000 (40%)]\tLoss: 0.374717\n", + "[trainer-node]: Train Epoch: 6 [7000/60000 (47%)]\tLoss: 0.308872\n", + "[trainer-node]: Train Epoch: 6 [8000/60000 (53%)]\tLoss: 0.183813\n", + "[trainer-node]: Train Epoch: 6 [9000/60000 (60%)]\tLoss: 0.223055\n", + "[trainer-node]: Train Epoch: 6 [10000/60000 (67%)]\tLoss: 0.211464\n", + "[trainer-node]: Train Epoch: 6 [11000/60000 (73%)]\tLoss: 0.222907\n", + "[trainer-node]: Train Epoch: 6 [12000/60000 (80%)]\tLoss: 0.194615\n", + "[trainer-node]: Train Epoch: 6 [13000/60000 (87%)]\tLoss: 0.229100\n", + "[trainer-node]: Train Epoch: 6 [14000/60000 (93%)]\tLoss: 0.196409\n", + "[trainer-node]: Train Epoch: 7 [0/60000 (0%)]\tLoss: 0.208812\n", + "[trainer-node]: Train Epoch: 7 [1000/60000 (7%)]\tLoss: 0.314821\n", + "[trainer-node]: Train Epoch: 7 [2000/60000 (13%)]\tLoss: 0.294604\n", + "[trainer-node]: Train Epoch: 7 [3000/60000 (20%)]\tLoss: 0.228930\n", + "[trainer-node]: Train Epoch: 7 [4000/60000 (27%)]\tLoss: 0.213625\n", + "[trainer-node]: Train Epoch: 7 [5000/60000 (33%)]\tLoss: 0.222754\n", + "[trainer-node]: Train Epoch: 7 [6000/60000 (40%)]\tLoss: 0.378021\n", + "[trainer-node]: Train Epoch: 7 [7000/60000 (47%)]\tLoss: 0.313782\n", + "[trainer-node]: Train Epoch: 7 [8000/60000 (53%)]\tLoss: 0.166859\n", + "[trainer-node]: Train Epoch: 7 [9000/60000 (60%)]\tLoss: 0.204268\n", + "[trainer-node]: Train Epoch: 7 [10000/60000 (67%)]\tLoss: 0.203014\n", + "[trainer-node]: Train Epoch: 7 [11000/60000 (73%)]\tLoss: 0.195203\n", + "[trainer-node]: Train Epoch: 7 [12000/60000 (80%)]\tLoss: 0.172944\n", + "[trainer-node]: Train Epoch: 7 [13000/60000 (87%)]\tLoss: 0.215562\n", + "[trainer-node]: Train Epoch: 7 [14000/60000 (93%)]\tLoss: 0.174588\n", + "[trainer-node]: Train Epoch: 8 [0/60000 (0%)]\tLoss: 0.199743\n", + "[trainer-node]: Train Epoch: 8 [1000/60000 (7%)]\tLoss: 0.302387\n", + "[trainer-node]: Train Epoch: 8 [2000/60000 (13%)]\tLoss: 0.281316\n", + "[trainer-node]: Train Epoch: 8 [3000/60000 (20%)]\tLoss: 0.248879\n", + "[trainer-node]: Train Epoch: 8 [4000/60000 (27%)]\tLoss: 0.194515\n", + "[trainer-node]: Train Epoch: 8 [5000/60000 (33%)]\tLoss: 0.215790\n", + "[trainer-node]: Train Epoch: 8 [6000/60000 (40%)]\tLoss: 0.328851\n", + "[trainer-node]: Train Epoch: 8 [7000/60000 (47%)]\tLoss: 0.268875\n", + "[trainer-node]: Train Epoch: 8 [8000/60000 (53%)]\tLoss: 0.157025\n", + "[trainer-node]: Train Epoch: 8 [9000/60000 (60%)]\tLoss: 0.176993\n", + "[trainer-node]: Train Epoch: 8 [10000/60000 (67%)]\tLoss: 0.211727\n", + "[trainer-node]: Train Epoch: 8 [11000/60000 (73%)]\tLoss: 0.199225\n", + "[trainer-node]: Train Epoch: 8 [12000/60000 (80%)]\tLoss: 0.162694\n", + "[trainer-node]: Train Epoch: 8 [13000/60000 (87%)]\tLoss: 0.231342\n", + "[trainer-node]: Train Epoch: 8 [14000/60000 (93%)]\tLoss: 0.175332\n", + "[trainer-node]: Train Epoch: 9 [0/60000 (0%)]\tLoss: 0.198400\n", + "[trainer-node]: Train Epoch: 9 [1000/60000 (7%)]\tLoss: 0.306376\n", + "[trainer-node]: Train Epoch: 9 [2000/60000 (13%)]\tLoss: 0.287058\n", + "[trainer-node]: Train Epoch: 9 [3000/60000 (20%)]\tLoss: 0.220416\n", + "[trainer-node]: Train Epoch: 9 [4000/60000 (27%)]\tLoss: 0.159419\n", + "[trainer-node]: Train Epoch: 9 [5000/60000 (33%)]\tLoss: 0.198032\n", + "[trainer-node]: Train Epoch: 9 [6000/60000 (40%)]\tLoss: 0.294416\n", + "[trainer-node]: Train Epoch: 9 [7000/60000 (47%)]\tLoss: 0.263291\n", + "[trainer-node]: Train Epoch: 9 [8000/60000 (53%)]\tLoss: 0.163996\n", + "[trainer-node]: Train Epoch: 9 [9000/60000 (60%)]\tLoss: 0.167783\n", + "[trainer-node]: Train Epoch: 9 [10000/60000 (67%)]\tLoss: 0.194410\n", + "[trainer-node]: Train Epoch: 9 [11000/60000 (73%)]\tLoss: 0.174011\n", + "[trainer-node]: Train Epoch: 9 [12000/60000 (80%)]\tLoss: 0.133021\n", + "[trainer-node]: Train Epoch: 9 [13000/60000 (87%)]\tLoss: 0.214966\n", + "[trainer-node]: Train Epoch: 9 [14000/60000 (93%)]\tLoss: 0.140199\n", + "[trainer-node]: Train Epoch: 10 [0/60000 (0%)]\tLoss: 0.181095\n", + "[trainer-node]: Train Epoch: 10 [1000/60000 (7%)]\tLoss: 0.299705\n", + "[trainer-node]: Train Epoch: 10 [2000/60000 (13%)]\tLoss: 0.259530\n", + "[trainer-node]: Train Epoch: 10 [3000/60000 (20%)]\tLoss: 0.238278\n", + "[trainer-node]: Train Epoch: 10 [4000/60000 (27%)]\tLoss: 0.154977\n", + "[trainer-node]: Train Epoch: 10 [5000/60000 (33%)]\tLoss: 0.207992\n", + "[trainer-node]: Train Epoch: 10 [6000/60000 (40%)]\tLoss: 0.271387\n", + "[trainer-node]: Train Epoch: 10 [7000/60000 (47%)]\tLoss: 0.244513\n", + "[trainer-node]: Train Epoch: 10 [8000/60000 (53%)]\tLoss: 0.187271\n", + "[trainer-node]: Train Epoch: 10 [9000/60000 (60%)]\tLoss: 0.155416\n", + "[trainer-node]: Train Epoch: 10 [10000/60000 (67%)]\tLoss: 0.185268\n", + "[trainer-node]: Train Epoch: 10 [11000/60000 (73%)]\tLoss: 0.174248\n", + "[trainer-node]: Train Epoch: 10 [12000/60000 (80%)]\tLoss: 0.137199\n", + "[trainer-node]: Train Epoch: 10 [13000/60000 (87%)]\tLoss: 0.209850\n", + "[trainer-node]: Train Epoch: 10 [14000/60000 (93%)]\tLoss: 0.136205\n", + "[trainer-node]: Training is finished\n" + ] + }, + { + "data": { + "text/plain": [ + "{'trainer-node': 'Using Device: cuda, Backend: nccl\\nDistributed Training for WORLD_SIZE: 4, RANK: 0, LOCAL_RANK: 0\\nDownloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz\\nDownloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to ./data/FashionMNIST/raw/train-images-idx3-ubyte.gz\\n\\r 0%| | 0.00/26.4M [00:00