-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlogistic_classifier.py
More file actions
407 lines (313 loc) · 13.7 KB
/
logistic_classifier.py
File metadata and controls
407 lines (313 loc) · 13.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
# coding: utf-8
# In[2]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# In[3]:
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
# In[4]:
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename) #[.\notMNIST_large\A, .\notMNIST_large\B,......, .\notMNIST_large\J]
test_folders = maybe_extract(test_filename)
# In[5]:
file_path =os.path.splitext(os.path.splitext(train_filename)[0])[0]
name = train_folders[0] + '\\bGFDYXJ0b29uZXJpZS50dGY=.png'
print (name)
Image(filename= name)
# In[7]:
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)#.\notMNIST_large\A\MDEtMDEtMDAudHRm.png
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
#it doesn't change the content of your image but it makes it much easier for the optimation to proceed numerically.
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle' #.\notMNIST_large\A.pickle
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class) #folder= .\notMNIST_large\A
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)# [.\notMNIST_large\A.pickle,.\notMNIST_large\B.pickle, ...]
test_datasets = maybe_pickle(test_folders, 1800)
# In[36]:
#problem 2
def maybe_unpickle(pickle_datasets):
for x in range(0, 10):
try:
with open(pickle_datasets[x], 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
data = pickle.load(f)
image_data = data[0, :, :]
plt.imshow(image_data)
plt.show()
except Exception as e:
print('unable to load data:', e)
maybe_unpickle(train_datasets)
# In[9]:
#problem 3
def verify_data(pickle_datasets):
for datasets in pickle_datasets:
try:
with open(datasets, 'rb') as f:
data = pickle.load(f)
print (data.shape)
except Exception as e:
print('unable to load data:', e)
verify_data(train_datasets)
# In[10]:
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files) # 10 [.\notMNIST_large\A.pickle,.\notMNIST_large\B.pickle, ...]
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
#valid_dataset: ndarray(10000,28,28) valid_labels: ndarray(10000)
train_dataset, train_labels = make_arrays(train_size, image_size)
#valid_dataset: ndarray(200000,28,28) valid_labels: ndarray(200000)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
print ("-----------",vsize_per_class)
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files): #[(0,'.\notMNIST_large\A.pickle'), (1, '.\notMNIST_large\B.pickle')...]
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f) #letter_set (52912, 28, 28)--> 52912 kinds of A
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
# In[11]:
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
# In[184]:
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
# In[12]:
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
# In[161]:
#Problem 5
# By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it. Measure how much overlap there is between training, validation and test samples.
# Optional questions:
# What about near duplicates between datasets? (images that are almost identical)
# Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.
import random
def display_overlap(overlap, source_dataset, target_dataset):
item = random.choice(list(overlap.keys()))
imgs = np.concatenate(([source_dataset[item]], target_dataset[overlap[item][0:7]]))
plt.suptitle(item)
for i, img in enumerate(imgs):
plt.subplot(2, 1, i+1)
plt.axis('off')
plt.imshow(img)
plt.show()
# In[75]:
def extract_overlap(dataset_1, dataset_2):
overlap = {}
for i, img_1 in enumerate(dataset_1):
for j, img_2 in enumerate(dataset_2):
if np.array_equal(img_1, img_2):
if not i in overlap.keys():
overlap[i] = []
overlap[i].append(j)
return overlap
# In[172]:
import hashlib
def sanitize(dataset_1, dataset_2, labels_1):
dataset_hash_1 = np.array([hashlib.sha256(img).hexdigest() for img in dataset_1])
dataset_hash_2 = np.array([hashlib.sha256(img).hexdigest() for img in dataset_2])
overlap = [] # list of indexes
for i, hash1 in enumerate(dataset_hash_1):
duplicates = np.where(dataset_hash_2 == hash1)
if len(duplicates[0]):
overlap.append(i)
return np.delete(dataset_1, overlap, 0), np.delete(labels_1, overlap, None)
# In[76]:
get_ipython().magic('time overlap_test_train = extract_overlap(test_dataset[:200], train_dataset)')
# In[167]:
get_ipython().magic('time test_dataset_sanit, test_labels_sanit = sanetize(test_dataset[:200], train_dataset, test_labels[:200])')
print('Overlapping images removed: ', len(test_dataset[:200]) - len(test_dataset_sanit))
# In[173]:
get_ipython().magic('time test_dataset_sanit, test_labels_sanit = sanitize(test_dataset, train_dataset, test_labels)')
print('Overlapping images removed: ', len(test_dataset) - len(test_dataset_sanit))
# In[162]:
print(overlap_test_train)
# print('Number of overlaps:', len(overlap_test_train.keys()))
display_overlap(overlap_test_train, test_dataset[:200], train_dataset)
# In[202]:
#Problem 6
# Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.
# Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model.
# Optional question: train an off-the-shelf model on all the data!
# clf = LogisticRegression(solver='sag',multi_class='multinomial')
X_test = test_dataset.reshape(test_size, -1)
y_test = test_labels
sample_size = 50
res = {}
for sample_size in [50, 100, 1000, 5000, 10000, 50000]:
clf = LogisticRegression(solver='sag',multi_class='multinomial', n_jobs=-1)
X_train = train_dataset[:sample_size].reshape(sample_size, -1)
y_train = train_labels[:sample_size]
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
res[sample_size] = {'clf': clf, 'score': score}
print(sample_size, score)
# In[191]:
pretty_labels = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J'}
def disp_sample_dataset(dataset, labels):
items = random.sample(range(len(labels)), 8)
for i, item in enumerate(items):
plt.subplot(2, 4, i+1)
plt.axis('off')
plt.title(pretty_labels[labels[item]])
plt.imshow(dataset[item])
plt.show()
# In[192]:
pred_labels = clf.predict(X_test)
disp_sample_dataset(test_dataset, pred_labels)