Skip to content

Commit 20c95db

Browse files
committed
Fix several minor typos in some notebooks and an example
Signed-off-by: James Raphael Tiovalen <[email protected]> Signed-off-by: James R T <[email protected]>
1 parent 8514f60 commit 20c95db

20 files changed

+51
-51
lines changed

examples/inverse_gan_author_utils.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ class Dataset(object):
274274
"""
275275

276276
def __init__(self, name, data_dir=path_locations["data"]):
277-
"""The datasaet default constructor.
277+
"""The dataset default constructor.
278278
279279
Args:
280280
name: A string, name of the dataset.
@@ -959,7 +959,7 @@ def __init__(
959959
verbose=True,
960960
**args
961961
):
962-
self.dataset_name = None # Name of the datsaet.
962+
self.dataset_name = None # Name of the dataset.
963963
self.batch_size = 32 # Batch size for training the GAN.
964964
self.use_bn = True # Use batchnorm in the discriminator and generator.
965965
self.use_resblock = False # Use resblocks in DefenseGAN.
@@ -1045,7 +1045,7 @@ def _load_dataset(self):
10451045
def _build(self):
10461046
"""Builds the computation graph."""
10471047

1048-
assert (self.batch_size % self.rec_rr) == 0, "Batch size should be divisable by random restart"
1048+
assert (self.batch_size % self.rec_rr) == 0, "Batch size should be divisible by random restart"
10491049

10501050
self.discriminator_training = tf.placeholder(tf.bool)
10511051
self.encoder_training = tf.placeholder(tf.bool)
@@ -1525,7 +1525,7 @@ def __init__(self, batch_size):
15251525
# original self.init_opt = tf.variables_initializer(var_list=[modifier] + new_vars)
15261526
self.init_opt = tf.variables_initializer(var_list=[] + new_vars)
15271527

1528-
print("Reconstruction module initialzied...\n")
1528+
print("Reconstruction module initialized...\n")
15291529

15301530
def generate_z_extrapolated_k(self):
15311531
x_shape = [28, 28, 1]
@@ -1894,7 +1894,7 @@ def spectral_norm(w, num_iters=1, update_collection=None):
18941894

18951895

18961896
##################################################################################
1897-
# Residual Blockes
1897+
# Residual Blocks
18981898
##################################################################################
18991899

19001900

notebooks/adaptive_defence_evaluations/evaluation_12_EMPIR.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@
172172
"metadata": {},
173173
"outputs": [],
174174
"source": [
175-
"# Create placegolders\n",
175+
"# Create placeholders\n",
176176
"x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))\n",
177177
"y = tf.placeholder(tf.float32, shape=(None, 10))\n",
178178
"phase = tf.placeholder(tf.bool, name=\"phase\")\n",
@@ -560,7 +560,7 @@
560560
}
561561
],
562562
"source": [
563-
"# Get accuracy on benging test samples for each model separately\n",
563+
"# Get accuracy on benign test samples for each model separately\n",
564564
"\n",
565565
"accuracy_test_benign_1 = get_accuracy(X=X_test, Y=Y_test, batch_size=batch_size, predictions=preds_prob_1)\n",
566566
"print('Model 1 - Accuracy on benign test samples: {0:.2f}%.'.format(accuracy_test_benign_1 * 100))\n",

notebooks/adversarial_training_mnist.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@
312312
"metadata": {},
313313
"outputs": [],
314314
"source": [
315-
"# We had performed this before, starting with a randomly intialized model.\n",
315+
"# We had performed this before, starting with a randomly initialized model.\n",
316316
"# Adversarial training takes about 80 minutes on an NVIDIA V100.\n",
317317
"# The resulting model is the one loaded from mnist_cnn_robust.h5 above.\n",
318318
"\n",

notebooks/attack_database_reconstruction.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
"\n",
3030
"In this example, we train a Gaussian Naive Bayes classifier (`model`) with the training dataset, then remove a single row from that dataset, and seek to reconstruct that row using `model`. For typical examples, this attack is successful up to machine precision.\n",
3131
"\n",
32-
"We then show that launching the same attack on a ML model trained with differential privacy guarantees provides protection for the traning dataset, and prevents learning the target row with precision."
32+
"We then show that launching the same attack on a ML model trained with differential privacy guarantees provides protection for the training dataset, and prevents learning the target row with precision."
3333
]
3434
},
3535
{
@@ -228,7 +228,7 @@
228228
"cell_type": "markdown",
229229
"metadata": {},
230230
"source": [
231-
"We can mitigate against this attack by training the public ML model with differential privacy. We will use [diffprivlib](https://github.com/Trusted-AI/differential-privacy-library) to train a differentially private Guassian naive Bayes classifier. We can mitigate against any loss in accuracy of the model by choosing an `epsilon` value appropriate to our needs."
231+
"We can mitigate against this attack by training the public ML model with differential privacy. We will use [diffprivlib](https://github.com/Trusted-AI/differential-privacy-library) to train a differentially private Gaussian naive Bayes classifier. We can mitigate against any loss in accuracy of the model by choosing an `epsilon` value appropriate to our needs."
232232
]
233233
},
234234
{

notebooks/attack_defence_imagenet.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -1663,7 +1663,7 @@
16631663
}
16641664
],
16651665
"source": [
1666-
"# Initalize the SpatialSmoothing defence. \n",
1666+
"# Initialize the SpatialSmoothing defence. \n",
16671667
"ss = SpatialSmoothing(window_size=3)\n",
16681668
"\n",
16691669
"# Apply the defence to the original input and to the adversarial sample, respectively:\n",

notebooks/attack_laser.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
"[paper](https://openaccess.thecvf.com/content/CVPR2021/papers/Duan_Adversarial_Laser_Beam_Effective_Physical-World_Attack_to_DNNs_in_a_CVPR_2021_paper.pdf) is based on the following idea. \n",
1111
"Image of the laser beam is generated and then added to the attacked image. \n",
1212
"Laser beam is described by four parameters: wavelength(nanometers), angle(rad), bias(px) and width(px). \n",
13-
"During the attack those parameters are optimised in order to achievie prediction change. \n"
13+
"During the attack those parameters are optimised in order to achieve prediction change. \n"
1414
]
1515
},
1616
{
@@ -89,7 +89,7 @@
8989
"cell_type": "markdown",
9090
"metadata": {},
9191
"source": [
92-
"### Image of random laser beam based on prevoius parameters"
92+
"### Image of random laser beam based on previous parameters"
9393
]
9494
},
9595
{

notebooks/attack_membership_inference.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@
237237
"cell_type": "markdown",
238238
"metadata": {},
239239
"source": [
240-
"Acheives much better results than the rule-based attack."
240+
"Achieves much better results than the rule-based attack."
241241
]
242242
},
243243
{
@@ -462,7 +462,7 @@
462462
"cell_type": "markdown",
463463
"metadata": {},
464464
"source": [
465-
"For the pytorch target model we were able to acheive slightly better than random attack performance, but not as good as for the random forest model."
465+
"For the pytorch target model we were able to achieve slightly better than random attack performance, but not as good as for the random forest model."
466466
]
467467
}
468468
],

notebooks/attack_membership_inference_shadow_models.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
"cell_type": "markdown",
3333
"metadata": {},
3434
"source": [
35-
"The data is seperated, 25% will go towards and training and testing the target model, 75% of data will be used as shadow training data."
35+
"The data is separated, 25% will go towards and training and testing the target model, 75% of data will be used as shadow training data."
3636
]
3737
},
3838
{

notebooks/classifier_blackbox_lookup_table.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@
168168
"cell_type": "markdown",
169169
"metadata": {},
170170
"source": [
171-
"### Create a black-box classifier based on exisiting predictions"
171+
"### Create a black-box classifier based on existing predictions"
172172
]
173173
},
174174
{

notebooks/classifier_gpy_gaussian_process.ipynb

+5-5
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@
7676
"#get the model\n",
7777
"m = GPy.models.GPClassification(X, y.reshape(-1,1), kernel=gpkern)\n",
7878
"m.rbf.lengthscale.fix(0.4)\n",
79-
"#determining the infernce method\n",
79+
"#determining the inference method\n",
8080
"m.inference_method = GPy.inference.latent_function_inference.laplace.Laplace()\n",
8181
"#now train the model\n",
8282
"m.optimize(messages=True, optimizer='lbfgs')\n",
@@ -98,7 +98,7 @@
9898
"### Confidence optimized adversarial examples\n",
9999
"We craft adversarial examples which are optimized for confidence. We plot the initial seeds for the adversarial examples in green and the resulting adversarial examples in black, and connect the initial and final points using a straight line (which is not equivalent to the path the optimization took).\n",
100100
"\n",
101-
"We observe that some examples are not moving towards the other class, but instead seem to move randomly away from the data. This stems from the problem that the Gaussian Processes' gradients point away from the data in all directions, and might lead the attack far away from the actauly boundary."
101+
"We observe that some examples are not moving towards the other class, but instead seem to move randomly away from the data. This stems from the problem that the Gaussian Processes' gradients point away from the data in all directions, and might lead the attack far away from the actual boundary."
102102
]
103103
},
104104
{
@@ -143,7 +143,7 @@
143143
"metadata": {},
144144
"source": [
145145
"### Uncertainty optimized adversarial examples\n",
146-
"We can additionally optimize for uncetainty by setting unc_increase to 0.9, thereby forcing the adversarial examples to be closer to the original training data."
146+
"We can additionally optimize for uncertainty by setting unc_increase to 0.9, thereby forcing the adversarial examples to be closer to the original training data."
147147
]
148148
},
149149
{
@@ -188,8 +188,8 @@
188188
"cell_type": "markdown",
189189
"metadata": {},
190190
"source": [
191-
"### PGD on Guassian process classification\n",
192-
"To conclude, we show how to compute PGD adversarial exmples on our model. We observe that as before, many attempts fail, as the model misleads the attack to take a wrong path away from the boundary. In this case, examples are classified as default: either of the two classes."
191+
"### PGD on Gaussian process classification\n",
192+
"To conclude, we show how to compute PGD adversarial examples on our model. We observe that as before, many attempts fail, as the model misleads the attack to take a wrong path away from the boundary. In this case, examples are classified as default: either of the two classes."
193193
]
194194
},
195195
{

notebooks/classifier_scikitlearn_AdaBoostClassifier.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
" model = AdaBoostClassifier()\n",
4747
" model.fit(X=x_train, y=y_train)\n",
4848
"\n",
49-
" # Create ART classfier for scikit-learn AdaBoostClassifier\n",
49+
" # Create ART classifier for scikit-learn AdaBoostClassifier\n",
5050
" art_classifier = SklearnClassifier(model=model)\n",
5151
"\n",
5252
" # Create ART Zeroth Order Optimization attack\n",

notebooks/classifier_scikitlearn_RandomForestClassifier.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
" model = RandomForestClassifier()\n",
4747
" model.fit(X=x_train, y=y_train)\n",
4848
"\n",
49-
" # Create ART classfier for scikit-learn RandomForestClassifier\n",
49+
" # Create ART classifier for scikit-learn RandomForestClassifier\n",
5050
" art_classifier = SklearnClassifier(model=model)\n",
5151
"\n",
5252
" # Create ART Zeroth Order Optimization attack\n",

notebooks/classifier_scikitlearn_SVC_LinearSVC.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
" decision_function_shape='ovr', random_state=None)\n",
5454
" model.fit(X=x_train, y=y_train)\n",
5555
"\n",
56-
" # Create ART classfier for scikit-learn SVC\n",
56+
" # Create ART classifier for scikit-learn SVC\n",
5757
" art_classifier = SklearnClassifier(model=model, clip_values=(0, 10))\n",
5858
"\n",
5959
" # Create ART attack\n",

notebooks/classifier_scikitlearn_pipeline_pca_cv_svc.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
"cell_type": "markdown",
1212
"metadata": {},
1313
"source": [
14-
"This notebook contains an example of generating adversarial samples using a black-box attack against a scikit-learn pipeline consisting of principal component analysis (PCA) and a support vector machine classifier (SVC), but any other valid pipeline would work too. The pipeline is first optimised using grid search with cross validation. The adversarial samples are created with black-box `HopSkipJump` attack. The training data is MNIST, becasue of its intuitive visualisation, but any other dataset including tabular data would be suitable too."
14+
"This notebook contains an example of generating adversarial samples using a black-box attack against a scikit-learn pipeline consisting of principal component analysis (PCA) and a support vector machine classifier (SVC), but any other valid pipeline would work too. The pipeline is first optimised using grid search with cross validation. The adversarial samples are created with black-box `HopSkipJump` attack. The training data is MNIST, because of its intuitive visualisation, but any other dataset including tabular data would be suitable too."
1515
]
1616
},
1717
{

notebooks/expectation_over_transformation_classification_rotation.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@
343343
"cell_type": "markdown",
344344
"metadata": {},
345345
"source": [
346-
"To make the adversarial example robust against rotation we apply Expectation over Transformation (EoT) sampling using ART's `art.preprocessing.expectation_over_transformation.EoTImageRotationTensorFlow` tool. The instance `eot_rotation` will draw `eot_samples` samples and roate each with a differen random angle in each evaluation of the classifier model, including predictions and loss gradient calculations."
346+
"To make the adversarial example robust against rotation we apply Expectation over Transformation (EoT) sampling using ART's `art.preprocessing.expectation_over_transformation.EoTImageRotationTensorFlow` tool. The instance `eot_rotation` will draw `eot_samples` samples and rotate each with a different random angle in each evaluation of the classifier model, including predictions and loss gradient calculations."
347347
]
348348
},
349349
{
@@ -410,7 +410,7 @@
410410
"cell_type": "markdown",
411411
"metadata": {},
412412
"source": [
413-
"Applying EoT sampling of rotations to the classifier used by the attack generates adversarial examples that are robust against rotations. The plots below show that the adversarial exmaple created in the previous section remains adversarial with high confidence for a large variety of rotation angles (red bars). The evaluation was done with predictions of the ART classifier without EoT, but the same classifier model, created first above, to not add additional rotation to the evaluated input."
413+
"Applying EoT sampling of rotations to the classifier used by the attack generates adversarial examples that are robust against rotations. The plots below show that the adversarial example created in the previous section remains adversarial with high confidence for a large variety of rotation angles (red bars). The evaluation was done with predictions of the ART classifier without EoT, but the same classifier model, created first above, to not add additional rotation to the evaluated input."
414414
]
415415
},
416416
{

notebooks/imperceptible_attack_on_tabular_data.ipynb

+13-13
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@
853853
"output_type": "stream",
854854
"text": [
855855
"Test set size: 114\n",
856-
"Succes rate: 100.00%\n"
856+
"Success rate: 100.00%\n"
857857
]
858858
}
859859
],
@@ -864,7 +864,7 @@
864864
" y_valid_cancer.apply(lambda x: np.random.choice([i for i in range(n_classes) if i != x]))\n",
865865
")]\n",
866866
"\n",
867-
"# Generate adverasies\n",
867+
"# Generate adversaries\n",
868868
"adversaries = lpf_svc.generate(x=X_valid_cancer, y=targets)\n",
869869
"\n",
870870
"# Check the success rate\n",
@@ -874,7 +874,7 @@
874874
"correct = (expected == predicted)\n",
875875
"success_rate = np.sum(correct) / correct.shape[0]\n",
876876
"print(\"Test set size: {}\".format(targets.shape[0]))\n",
877-
"print(\"Succes rate: {:.2f}%\".format(100*success_rate))"
877+
"print(\"Success rate: {:.2f}%\".format(100*success_rate))"
878878
]
879879
},
880880
{
@@ -1092,7 +1092,7 @@
10921092
" correct = (expected == predicted)\n",
10931093
" \n",
10941094
" success_rate = np.sum(correct) / correct.shape[0]\n",
1095-
" print(\"Succes rate: {:.2f}%\".format(100*success_rate))\n",
1095+
" print(\"Success rate: {:.2f}%\".format(100*success_rate))\n",
10961096
" \n",
10971097
" return adversaries"
10981098
]
@@ -1113,12 +1113,12 @@
11131113
"name": "stdout",
11141114
"output_type": "stream",
11151115
"text": [
1116-
"Succes rate: 100.00%\n"
1116+
"Success rate: 100.00%\n"
11171117
]
11181118
}
11191119
],
11201120
"source": [
1121-
"# Wrapping classifier into appropiate ART-friendly wrapper\n",
1121+
"# Wrapping classifier into appropriate ART-friendly wrapper\n",
11221122
"logistic_regression_iris_wrapper = ScikitlearnLogisticRegression(\n",
11231123
" model = log_regression_clf_iris, \n",
11241124
" clip_values = scaled_clip_values_iris\n",
@@ -1251,12 +1251,12 @@
12511251
"name": "stdout",
12521252
"output_type": "stream",
12531253
"text": [
1254-
"Succes rate: 100.00%\n"
1254+
"Success rate: 100.00%\n"
12551255
]
12561256
}
12571257
],
12581258
"source": [
1259-
"# Wrapping classifier into appropiate ART-friendly wrapper\n",
1259+
"# Wrapping classifier into appropriate ART-friendly wrapper\n",
12601260
"logistic_regression_cancer_wrapper = ScikitlearnLogisticRegression(\n",
12611261
" model = log_regression_clf_cancer, \n",
12621262
" clip_values = scaled_clip_values_cancer\n",
@@ -1382,7 +1382,7 @@
13821382
" correct = (expected == predicted)\n",
13831383
" \n",
13841384
" success_rate = np.sum(correct) / correct.shape[0]\n",
1385-
" print(\"Succes rate: {:.2f}%\".format(100*success_rate))\n",
1385+
" print(\"Success rate: {:.2f}%\".format(100*success_rate))\n",
13861386
" \n",
13871387
" return adversaries"
13881388
]
@@ -1403,12 +1403,12 @@
14031403
"name": "stdout",
14041404
"output_type": "stream",
14051405
"text": [
1406-
"Succes rate: 100.00%\n"
1406+
"Success rate: 100.00%\n"
14071407
]
14081408
}
14091409
],
14101410
"source": [
1411-
"# Wrapping classifier into appropiate ART-friendly wrapper\n",
1411+
"# Wrapping classifier into appropriate ART-friendly wrapper\n",
14121412
"# (in this case it is PyTorch NN classifier wrapper from ART)\n",
14131413
"neural_network_iris_wrapper = PyTorchClassifier(\n",
14141414
" model = nn_model_iris, \n",
@@ -1516,12 +1516,12 @@
15161516
"name": "stdout",
15171517
"output_type": "stream",
15181518
"text": [
1519-
"Succes rate: 97.37%\n"
1519+
"Success rate: 97.37%\n"
15201520
]
15211521
}
15221522
],
15231523
"source": [
1524-
"# Wrapping classifier into appropiate ART-friendly wrapper\n",
1524+
"# Wrapping classifier into appropriate ART-friendly wrapper\n",
15251525
"# (in this case it is PyTorch NN classifier wrapper from ART)\n",
15261526
"neural_network_cancer_wrapper = PyTorchClassifier(\n",
15271527
" model = nn_model_cancer, \n",

notebooks/label_only_membership_inference.ipynb

+5-5
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
"\n",
1818
"The basic intuition behind the attack is that samples used in training will be farther away from the decision boundary than non-training data.\n",
1919
"\n",
20-
"The attacker uses various means of **perturbations** to measure the amount of noise that is needed to \"change the classifier's mind\" about their prediction for a given sample. Since the ML model is more confident on training data, the attacker will need to perturb the input more to force the model to misclassify. Thus, the amount of perturbation needed will be analogue to the sample's distance from the decision boundary. Both of the below listed attacks use an adversarial perturbation techique called [**HopSkipJump**](https://arxiv.org/abs/1904.02144).\n",
20+
"The attacker uses various means of **perturbations** to measure the amount of noise that is needed to \"change the classifier's mind\" about their prediction for a given sample. Since the ML model is more confident on training data, the attacker will need to perturb the input more to force the model to misclassify. Thus, the amount of perturbation needed will be analogue to the sample's distance from the decision boundary. Both of the below listed attacks use an adversarial perturbation technique called [**HopSkipJump**](https://arxiv.org/abs/1904.02144).\n",
2121
"\n",
2222
"![title](img/label_only_boundary_intuition.png)\n",
2323
"\n",
@@ -268,7 +268,7 @@
268268
"metadata": {},
269269
"source": [
270270
"<a id='infer'></a>\n",
271-
"### Infer membeship on evaluation data"
271+
"### Infer membership on evaluation data"
272272
]
273273
},
274274
{
@@ -415,7 +415,7 @@
415415
"metadata": {},
416416
"source": [
417417
"<a id='infer_nodata'></a>\n",
418-
"### Infer membeship on evaluation data"
418+
"### Infer membership on evaluation data"
419419
]
420420
},
421421
{
@@ -440,7 +440,7 @@
440440
}
441441
],
442442
"source": [
443-
"pred_label_unsuperviseed = mia_label_only_unsupervised.infer(x_eval, y_eval)"
443+
"pred_label_unsupervised = mia_label_only_unsupervised.infer(x_eval, y_eval)"
444444
]
445445
},
446446
{
@@ -460,7 +460,7 @@
460460
}
461461
],
462462
"source": [
463-
"print(\"Accuracy: %f\" % accuracy_score(eval_label, pred_label_unsuperviseed))"
463+
"print(\"Accuracy: %f\" % accuracy_score(eval_label, pred_label_unsupervised))"
464464
]
465465
},
466466
{

0 commit comments

Comments
 (0)