-
Notifications
You must be signed in to change notification settings - Fork 7
fixed issues 61/62 #199
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
fixed issues 61/62 #199
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,81 @@ | ||
| """Simple Convolution and fully connected blocks cross validation example.""" | ||
| from vulcanai import datasets | ||
| from vulcanai.models import ConvNet, DenseNet | ||
| from vulcanai.models.metrics import Metrics | ||
|
|
||
| import torchvision.transforms as transforms | ||
| from torch.utils.data import DataLoader | ||
|
|
||
| # prepare the data | ||
| normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]], | ||
| std=[x/255.0 for x in [63.0, 62.1, 66.7]]) | ||
|
|
||
| transform = transforms.Compose([transforms.ToTensor(), | ||
| normalize]) | ||
|
|
||
|
|
||
| data_path = "../data" | ||
| dataset = datasets.FashionData(root=data_path, | ||
| train=True, | ||
| transform=transform, | ||
| download=True) | ||
|
|
||
| batch_size = 100 | ||
|
|
||
| data_loader = DataLoader(dataset=dataset, | ||
| batch_size=batch_size, | ||
| shuffle=True) | ||
|
|
||
|
|
||
|
|
||
| # define neural network - 3 2D conv layers followed by a dense layer | ||
| conv_2D_config = { | ||
| 'conv_units': [ | ||
| dict( | ||
| in_channels=1, | ||
| out_channels=16, | ||
| kernel_size=(5, 5), | ||
| stride=2, | ||
| dropout=0.1 | ||
| ), | ||
| dict( | ||
| in_channels=16, | ||
| out_channels=32, | ||
| kernel_size=(5, 5), | ||
| dropout=0.1 | ||
| ), | ||
| dict( | ||
| in_channels=32, | ||
| out_channels=64, | ||
| kernel_size=(5, 5), | ||
| pool_size=2, | ||
| dropout=0.1 | ||
| ) | ||
| ], | ||
| } | ||
|
|
||
| dense_config = { | ||
| 'dense_units': [100, 50], | ||
| 'dropout': 0.5, # Single value or List | ||
| } | ||
|
|
||
| conv_2D = ConvNet( | ||
| name='conv_2D', | ||
| in_dim=(1, 28, 28), | ||
| config=conv_2D_config | ||
| ) | ||
|
|
||
| dense_model = DenseNet( | ||
| name='dense_model', | ||
| input_networks=conv_2D, | ||
| config=dense_config, | ||
| num_classes=10, | ||
| early_stopping="best_validation_error", | ||
| early_stopping_patience=2 | ||
| ) | ||
|
|
||
|
|
||
| # cross validate on 5 folds training each fold for 2 epochs | ||
| m = Metrics() | ||
|
|
||
| m.cross_validate(dense_model, data_loader, 5, 2) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -28,6 +28,7 @@ | |
|
|
||
| sns.set(style='dark') | ||
| logger = logging.getLogger(__name__) | ||
| logger.addHandler(logging.StreamHandler()) | ||
|
|
||
|
|
||
| # Because pytorch causes a bunch of unresolved references | ||
|
|
@@ -651,9 +652,11 @@ def fit(self, train_loader, val_loader, epochs, | |
| else: | ||
| save_path = save_path + '/' + self.name + '_' | ||
| save_path = get_save_path(save_path, vis_type='train') | ||
| iterator = trange(epochs, desc='Epoch: ') | ||
| # iterator = trange(epochs, desc='Epoch: ') | ||
|
|
||
| for epoch in iterator: | ||
| for epoch in range(epochs): | ||
|
|
||
| logger.info('\n -------- Epoch: {} --------\n'.format(epoch)) | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why have this additional epoch log if the tqdm writer also writes the current epoch?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Because I don't use tqdm on the epoch level anymore
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The problem was the overlapping progress bars, that's why i removed the epoch level one
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. please see my other comment about this. I don't think the solution for this is to remove the progress bar altogether. unless I'm missing something, we just need to clean up the nested progress bar implementation. |
||
|
|
||
| train_loss, train_acc = self._train_epoch(train_loader, | ||
| retain_graph) | ||
|
|
@@ -687,7 +690,7 @@ def fit(self, train_loader, val_loader, epochs, | |
| self.__dict__.update(self.load_model( | ||
| early_stopping.save_path).__dict__) | ||
| # for tqdm | ||
| iterator.close() | ||
| # iterator.close() | ||
| break | ||
|
|
||
| # reset from None so that a distinction can be made | ||
|
|
@@ -697,15 +700,26 @@ def fit(self, train_loader, val_loader, epochs, | |
| if not valid_acc: | ||
| valid_acc = np.nan | ||
|
|
||
| tqdm.write( | ||
| "\n Epoch {}:\n" | ||
| "Train Loss: {:.6f} | Val Loss: {:.6f} |" | ||
| "Train Acc: {:.4f} | Val Acc: {:.4f}".format( | ||
| self.epoch, | ||
| train_loss, | ||
| valid_loss, | ||
| train_acc, | ||
| valid_acc)) | ||
|
|
||
| if epoch % valid_interv == 0: | ||
| tqdm.write( | ||
| "\nEpoch {} Summary:\n" | ||
| "Train Loss: {:.6f} | Val Loss: {:.6f} |" | ||
| "Train Acc: {:.4f} | Val Acc: {:.4f} \n".format( | ||
| self.epoch, | ||
| train_loss, | ||
| valid_loss, | ||
| train_acc, | ||
| valid_acc)) | ||
|
|
||
| else: | ||
| tqdm.write( | ||
| "\nEpoch {} Summary:\n" | ||
| "Train Loss: {:.6f} | Train Acc: {:.4f} \n".format( | ||
| self.epoch, | ||
| train_loss, | ||
| train_acc)) | ||
|
|
||
|
|
||
| self.record['epoch'].append(self.epoch) | ||
| self.record['train_error'].append(train_loss) | ||
|
|
@@ -723,8 +737,8 @@ def fit(self, train_loader, val_loader, epochs, | |
|
|
||
| except KeyboardInterrupt: | ||
| logger.warning( | ||
| "\n\n**********KeyboardInterrupt: " | ||
| "Training stopped prematurely.**********\n\n") | ||
| "\n\n********** KeyboardInterrupt: " | ||
| "Training stopped prematurely. **********\n\n") | ||
|
|
||
| def _train_epoch(self, train_loader, retain_graph): | ||
| """ | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
doesn't this remove the progress bar at the epoch level?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The problem was the overlapping progress bars, that's why i removed the outer one
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
but you still need to print the epoch progress bar. Nested progress bars are what we are going for so they just need to be cleaned up in implementation.