Skip to content

Commit 66e1021

Browse files
authored
Update mnistTest.py
1 parent 77a42e5 commit 66e1021

File tree

1 file changed

+28
-32
lines changed

1 file changed

+28
-32
lines changed

mnistTest.py

Lines changed: 28 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -2,22 +2,16 @@
22
from ml import Activation, Loss, WeightInitializer, BiasInitializer, Model, Optimizer
33

44
print("loading data")
5-
trainingData = []
6-
with open("mnist/train.txt", "r") as file:
7-
for line in file:
8-
line = line.strip().split(",")
9-
x = [int(pixel) / 255 for pixel in line[1:]]
10-
y = [0] * 10
11-
y[int(line[0])] = 1
12-
trainingData.append((x, y))
13-
testingData = []
14-
with open("mnist/test.txt", "r") as file:
15-
for line in file:
16-
line = line.strip().split(",")
17-
x = [int(pixel) / 255 for pixel in line[1:]]
18-
y = [0] * 10
19-
y[int(line[0])] = 1
20-
testingData.append((x, y))
5+
trainData = np.loadtxt("mnist/train.txt", delimiter=',', dtype=int)
6+
xTrain = trainData[:, 1:] / 255.0
7+
yTrain = np.zeros((trainData.shape[0], 10), dtype=int)
8+
yTrain[np.arange(trainData.shape[0]), trainData[:, 0]] = 1
9+
trainData = np.array([(xTrain[i], yTrain[i]) for i in range(xTrain.shape[0])], dtype=object)
10+
testData = np.loadtxt("mnist/test.txt", delimiter=',', dtype=int)
11+
xTest = testData[:, 1:] / 255.0
12+
yTest = np.zeros((testData.shape[0], 10), dtype=int)
13+
yTest[np.arange(testData.shape[0]), testData[:, 0]] = 1
14+
testData = np.array([(xTest[i], yTest[i]) for i in range(xTest.shape[0])], dtype=object)
2115

2216
model = Model(
2317
[784, 128, 10],
@@ -27,28 +21,30 @@
2721
BiasInitializer.Constant(0.1))
2822
loss = Loss.CrossEntropy()
2923
optimizer = Optimizer.Adam(model, 0.0003, 0.9, 0.999, 1e-8, 0.0001)
24+
model.initialize()
3025

26+
print("training")
3127
numEpochs = 5
3228
batchSize = 32
33-
34-
print("training")
35-
model.initialize()
3629
for epoch in range(numEpochs):
3730
print(f" epoch: {epoch + 1}/{numEpochs}")
38-
np.random.shuffle(trainingData)
39-
for i in range(batchSize, len(trainingData), batchSize):
40-
batch = trainingData[i - batchSize : i]
41-
for j in range(0, batchSize):
42-
model.backPropagate(batch[j][0], batch[j][1], loss)
31+
indices = np.random.permutation(len(trainData))
32+
trainDataShuffled = trainData[indices]
33+
for i in range(0, len(trainDataShuffled), batchSize):
34+
batch = trainDataShuffled[i:i + batchSize]
35+
for x, y in batch:
36+
model.backPropagate(x, y, loss)
4337
optimizer.step()
4438
model.zeroGradients()
4539

4640
print("evaluating")
47-
numCorrectTraining = 0
48-
numCorrectTesting = 0
49-
for i in range(len(trainingData)):
50-
numCorrectTraining += int(np.argmax(model.feedForward(trainingData[i][0])) == np.argmax(trainingData[i][1]))
51-
for i in range(len(testingData)):
52-
numCorrectTesting += int(np.argmax(model.feedForward(testingData[i][0])) == np.argmax(testingData[i][1]))
53-
print(f"training accuracy: {round(numCorrectTraining / len(trainingData) * 100, 2)}%")
54-
print(f"testing accuracy: {round(numCorrectTesting / len(testingData) * 100, 2)}%")
41+
numCorrectTrain = 0
42+
numCorrectTest = 0
43+
trainPredictions = np.argmax([model.feedForward(x) for x, _ in trainData], axis=1)
44+
trainLabels = np.argmax([y for _, y in trainData], axis=1)
45+
numCorrectTrain = np.sum(trainPredictions == trainLabels)
46+
testPredictions = np.argmax([model.feedForward(x) for x, _ in testData], axis=1)
47+
testLabels = np.argmax([y for _, y in testData], axis=1)
48+
numCorrectTest = np.sum(testPredictions == testLabels)
49+
print(f"training accuracy: {round(numCorrectTrain / len(trainData) * 100, 2)}%")
50+
print(f"testing accuracy: {round(numCorrectTest / len(testData) * 100, 2)}%")

0 commit comments

Comments
 (0)