-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathtrain.py
More file actions
67 lines (53 loc) · 2.33 KB
/
train.py
File metadata and controls
67 lines (53 loc) · 2.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# Training of a Neural Network with 5 layer. Only sum and product operations allowed.
# Store the Neural Network in the ./nn_data/ folder.
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
# Set parameters
learning_rate = 0.001
training_epochs = 50
batch_size = 100
display_step = 2
x = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
paddings = tf.constant([[0, 0], [1, 0,], [1, 0]])
input_layer = tf.reshape(x, [-1, 28, 28])
input_layer = tf.pad(input_layer, paddings, "CONSTANT")
input_layer = tf.reshape(input_layer, [-1, 29, 29, 1])
conv = tf.layers.conv2d(
inputs=input_layer,
filters=5,
kernel_size=[5, 5],
strides=[2, 2],
padding="valid",
activation=None,
name='convolution')
flat = tf.contrib.layers.flatten(conv)
square1 = flat*flat
pool = tf.layers.dense(square1, units = 100, name='dense1')
square2 = pool*pool
output = tf.layers.dense(square2, units = 10, name='dense2')
model = tf.sigmoid(output)
loss = tf.reduce_sum((y-model)*(y-model))
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
saver = tf.train.Saver()
init = tf.global_variables_initializer()
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.
training_iterations = int(mnist.train.num_examples/batch_size)
for iteration in range(training_iterations):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train, feed_dict={x: batch_xs, y: batch_ys})
avg_cost += sess.run(loss, feed_dict={x: batch_xs, y: batch_ys})/training_iterations
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
saved_path = saver.save(sess, './nn_data/net', global_step=training_epochs)
print("Training completed!")
predictions = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(predictions, "float"))
no_sig_predictions = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
no_sig_accuracy = tf.reduce_mean(tf.cast(no_sig_predictions, "float"))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
print("Accuracy without sigmoide:", no_sig_accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))