Skip to content

Add xpu support to mnist examples #1309

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
5 changes: 5 additions & 0 deletions mnist/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ def main():
help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False,
help='disables macOS GPU training')
parser.add_argument('--no-xpu', action='store_true', default=False,
help='disables Intel GPU training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
Expand All @@ -97,13 +99,16 @@ def main():
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
use_xpu = not args.no_mps and torch.xpu.is_available()

torch.manual_seed(args.seed)

if use_cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
elif use_xpu:
device = torch.device("xpu")
else:
device = torch.device("cpu")

Expand Down
1 change: 1 addition & 0 deletions mnist_forward_forward/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ optional arguments:
--epochs EPOCHS number of epochs to train (default: 1000)
--lr LR learning rate (default: 0.03)
--no_cuda disables CUDA training
--no_xpu disables XPU training
--no_mps disables MPS training
--seed SEED random seed (default: 1)
--save_model For saving the current Model
Expand Down
6 changes: 6 additions & 0 deletions mnist_forward_forward/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,9 @@ def train(self, x_pos, x_neg):
parser.add_argument(
"--no_cuda", action="store_true", default=False, help="disables CUDA training"
)
parser.add_argument(
"--no_xpu", action="store_true", default=False, help="disables XPU training"
)
parser.add_argument(
"--no_mps", action="store_true", default=False, help="disables MPS training"
)
Expand Down Expand Up @@ -138,9 +141,12 @@ def train(self, x_pos, x_neg):
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
use_xpu = not args.no_xpu and torch.xpu.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
if use_cuda:
device = torch.device("cuda")
elif use_xpu:
device = torch.device("xpu")
elif use_mps:
device = torch.device("mps")
else:
Expand Down
1 change: 0 additions & 1 deletion mnist_hogwild/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,4 @@ optional arguments:
--num_process how many training processes to use (default: 2)
--cuda enables CUDA training
--dry-run quickly check a single pass
--save-model For Saving the current Model
```
4 changes: 3 additions & 1 deletion mnist_hogwild/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,12 @@ def forward(self, x):

use_cuda = args.cuda and torch.cuda.is_available()
use_mps = args.mps and torch.backends.mps.is_available()

if use_cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")

else:
device = torch.device("cpu")

Expand All @@ -81,7 +83,7 @@ def forward(self, x):
kwargs.update({'num_workers': 1,
'pin_memory': True,
})

torch.manual_seed(args.seed)
mp.set_start_method('spawn', force=True)

Expand Down
18 changes: 18 additions & 0 deletions mnist_rnn/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,21 @@ pip install -r requirements.txt
python main.py
# CUDA_VISIBLE_DEVICES=2 python main.py # to specify GPU id to ex. 2
```
The main.py script accepts the following arguments:

```bash
optional arguments:
-h, --help show this help message and exit
--batch_size input batch_size for training (default:64)
--testing_batch_size input batch size for testing (default: 1000)
--epochs EPOCHS number of epochs to train (default: 14)
--lr LR learning rate (default: 0.1)
--gamma learning rate step gamma (default: 0.7)
--cuda enables CUDA training
--xpu enables XPU training
--mps enables macos GPU training
--seed SEED random seed (default: 1)
--save_model For saving the current Model
--log_interval how many batches to wait before logging training status
--dry-run quickly check a single pass
```
5 changes: 5 additions & 0 deletions mnist_rnn/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,8 @@ def main():
help='learning rate step gamma (default: 0.7)')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--xpu', action='store_true', default=False,
help='enables XPU training')
parser.add_argument('--mps', action="store_true", default=False,
help="enables MPS training")
parser.add_argument('--dry-run', action='store_true', default=False,
Expand All @@ -109,6 +111,8 @@ def main():
device = "cuda"
elif args.mps and not args.cuda:
device = "mps"
elif args.xpu:
device = "xpu"
else:
device = "cpu"

Expand All @@ -117,6 +121,7 @@ def main():
torch.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
Expand Down
16 changes: 16 additions & 0 deletions run_python_examples.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,22 @@ case $USE_CUDA in
;;
esac

USE_XPU=$(python -c "import torchvision, torch; print(torch.xpu.is_available())")
case $USE_XPU in
"True")
echo "using xpu"
XPU=1
XPU_FLAG="--xpu"
;;
"False")
echo "not using xpu"
XPU=0
XPU_FLAG=""
;;
"")
exit 1;


function dcgan() {
start
python main.py --dataset fake $CUDA_FLAG --mps --dry-run || error "dcgan failed"
Expand Down
7 changes: 4 additions & 3 deletions vae/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@ The main.py script accepts the following arguments:
optional arguments:
--batch-size input batch size for training (default: 128)
--epochs number of epochs to train (default: 10)
--no-cuda enables CUDA training
--mps enables GPU on macOS
--no-cuda disables CUDA training
--no-mps disables GPU on macOS
--no-xpu disables XPU training in Intel GPUs
--seed random seed (default: 1)
--log-interval how many batches to wait before logging training status
```
```
7 changes: 7 additions & 0 deletions vae/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,23 +17,30 @@
help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False,
help='disables macOS GPU training')
parser.add_argument('--no-xpu', action='store_true', default=False,
help='disables intel XPU training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
use_mps = not args.no_mps and torch.backends.mps.is_available()
use_xpu = not args.no_xpu and torch.xpu.is_available()

torch.manual_seed(args.seed)

if args.cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
elif use_xpu:
device = torch.device("xpu")
else:
device = torch.device("cpu")

print(device)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
Expand Down