diff --git a/mnist/main.py b/mnist/main.py index 184dc4744f..c28210468a 100644 --- a/mnist/main.py +++ b/mnist/main.py @@ -86,6 +86,8 @@ def main(): help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') + parser.add_argument('--no-xpu', action='store_true', default=False, + help='disables Intel GPU training') parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass') parser.add_argument('--seed', type=int, default=1, metavar='S', @@ -97,6 +99,7 @@ def main(): args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() use_mps = not args.no_mps and torch.backends.mps.is_available() + use_xpu = not args.no_mps and torch.xpu.is_available() torch.manual_seed(args.seed) @@ -104,6 +107,8 @@ def main(): device = torch.device("cuda") elif use_mps: device = torch.device("mps") + elif use_xpu: + device = torch.device("xpu") else: device = torch.device("cpu") diff --git a/mnist_forward_forward/README.md b/mnist_forward_forward/README.md index f6ae12e56d..2ed14733fc 100644 --- a/mnist_forward_forward/README.md +++ b/mnist_forward_forward/README.md @@ -17,6 +17,7 @@ optional arguments: --epochs EPOCHS number of epochs to train (default: 1000) --lr LR learning rate (default: 0.03) --no_cuda disables CUDA training + --no_xpu disables XPU training --no_mps disables MPS training --seed SEED random seed (default: 1) --save_model For saving the current Model diff --git a/mnist_forward_forward/main.py b/mnist_forward_forward/main.py index a175126067..be62cfe382 100644 --- a/mnist_forward_forward/main.py +++ b/mnist_forward_forward/main.py @@ -104,6 +104,9 @@ def train(self, x_pos, x_neg): parser.add_argument( "--no_cuda", action="store_true", default=False, help="disables CUDA training" ) + parser.add_argument( + "--no_xpu", action="store_true", default=False, help="disables XPU training" + ) parser.add_argument( "--no_mps", action="store_true", default=False, help="disables MPS training" ) @@ -138,9 +141,12 @@ def train(self, x_pos, x_neg): ) args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() + use_xpu = not args.no_xpu and torch.xpu.is_available() use_mps = not args.no_mps and torch.backends.mps.is_available() if use_cuda: device = torch.device("cuda") + elif use_xpu: + device = torch.device("xpu") elif use_mps: device = torch.device("mps") else: diff --git a/mnist_hogwild/README.md b/mnist_hogwild/README.md index 5f12161d53..6d1a613510 100644 --- a/mnist_hogwild/README.md +++ b/mnist_hogwild/README.md @@ -22,5 +22,4 @@ optional arguments: --num_process how many training processes to use (default: 2) --cuda enables CUDA training --dry-run quickly check a single pass - --save-model For Saving the current Model ``` diff --git a/mnist_hogwild/main.py b/mnist_hogwild/main.py index 6fa449233d..b969cb8689 100644 --- a/mnist_hogwild/main.py +++ b/mnist_hogwild/main.py @@ -60,10 +60,12 @@ def forward(self, x): use_cuda = args.cuda and torch.cuda.is_available() use_mps = args.mps and torch.backends.mps.is_available() + if use_cuda: device = torch.device("cuda") elif use_mps: device = torch.device("mps") + else: device = torch.device("cpu") @@ -81,7 +83,7 @@ def forward(self, x): kwargs.update({'num_workers': 1, 'pin_memory': True, }) - + torch.manual_seed(args.seed) mp.set_start_method('spawn', force=True) diff --git a/mnist_rnn/README.md b/mnist_rnn/README.md index c879cb367f..e6cfa15cf9 100644 --- a/mnist_rnn/README.md +++ b/mnist_rnn/README.md @@ -8,3 +8,21 @@ pip install -r requirements.txt python main.py # CUDA_VISIBLE_DEVICES=2 python main.py # to specify GPU id to ex. 2 ``` +The main.py script accepts the following arguments: + +```bash +optional arguments: + -h, --help show this help message and exit + --batch_size input batch_size for training (default:64) + --testing_batch_size input batch size for testing (default: 1000) + --epochs EPOCHS number of epochs to train (default: 14) + --lr LR learning rate (default: 0.1) + --gamma learning rate step gamma (default: 0.7) + --cuda enables CUDA training + --xpu enables XPU training + --mps enables macos GPU training + --seed SEED random seed (default: 1) + --save_model For saving the current Model + --log_interval how many batches to wait before logging training status + --dry-run quickly check a single pass +``` \ No newline at end of file diff --git a/mnist_rnn/main.py b/mnist_rnn/main.py index 2fa64c00d6..9ab7c3f80d 100644 --- a/mnist_rnn/main.py +++ b/mnist_rnn/main.py @@ -93,6 +93,8 @@ def main(): help='learning rate step gamma (default: 0.7)') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training') + parser.add_argument('--xpu', action='store_true', default=False, + help='enables XPU training') parser.add_argument('--mps', action="store_true", default=False, help="enables MPS training") parser.add_argument('--dry-run', action='store_true', default=False, @@ -109,6 +111,8 @@ def main(): device = "cuda" elif args.mps and not args.cuda: device = "mps" + elif args.xpu: + device = "xpu" else: device = "cpu" @@ -117,6 +121,7 @@ def main(): torch.manual_seed(args.seed) kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} + train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ diff --git a/run_python_examples.sh b/run_python_examples.sh index 0e06e4cfc0..7db53ccd2f 100755 --- a/run_python_examples.sh +++ b/run_python_examples.sh @@ -30,6 +30,22 @@ case $USE_CUDA in ;; esac +USE_XPU=$(python -c "import torchvision, torch; print(torch.xpu.is_available())") +case $USE_XPU in + "True") + echo "using xpu" + XPU=1 + XPU_FLAG="--xpu" + ;; + "False") + echo "not using xpu" + XPU=0 + XPU_FLAG="" + ;; + "") + exit 1; + + function dcgan() { start python main.py --dataset fake $CUDA_FLAG --mps --dry-run || error "dcgan failed" diff --git a/vae/README.md b/vae/README.md index cda6a33672..97778c5d8c 100644 --- a/vae/README.md +++ b/vae/README.md @@ -14,8 +14,9 @@ The main.py script accepts the following arguments: optional arguments: --batch-size input batch size for training (default: 128) --epochs number of epochs to train (default: 10) - --no-cuda enables CUDA training - --mps enables GPU on macOS + --no-cuda disables CUDA training + --no-mps disables GPU on macOS + --no-xpu disables XPU training in Intel GPUs --seed random seed (default: 1) --log-interval how many batches to wait before logging training status -``` \ No newline at end of file +``` diff --git a/vae/main.py b/vae/main.py index d69833fbe0..1d0d5ea2a8 100644 --- a/vae/main.py +++ b/vae/main.py @@ -17,6 +17,8 @@ help='disables CUDA training') parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training') +parser.add_argument('--no-xpu', action='store_true', default=False, + help='disables intel XPU training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', @@ -24,6 +26,7 @@ args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() use_mps = not args.no_mps and torch.backends.mps.is_available() +use_xpu = not args.no_xpu and torch.xpu.is_available() torch.manual_seed(args.seed) @@ -31,9 +34,13 @@ device = torch.device("cuda") elif use_mps: device = torch.device("mps") +elif use_xpu: + device = torch.device("xpu") else: device = torch.device("cpu") +print(device) + kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True,