Skip to content

Commit 0fa67e8

Browse files
committed
Adding more resources
1 parent 2994972 commit 0fa67e8

16 files changed

+356
-12
lines changed

.DS_Store

6 KB
Binary file not shown.

02_tf_basics.py

Lines changed: 0 additions & 10 deletions
This file was deleted.
File renamed without changes.
File renamed without changes.

linear_regression.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from __future__ import print_function
2+
3+
import tensorflow as tf
4+
5+
x_data = [1, 2, 3]
6+
y_data = [2, 4, 6]
7+
8+
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
9+
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
10+
11+
X = tf.placeholder(tf.float32)
12+
Y = tf.placeholder(tf.float32)
13+
14+
hypothesis = W * X + b
15+
16+
cost = tf.reduce_mean(tf.square(hypothesis - Y))
17+
18+
a = tf.Variable(0.1)
19+
optimizer = tf.train.GradientDescentOptimizer(a)
20+
train = optimizer.minimize(cost)
21+
22+
init = tf.global_variables_initializer()
23+
24+
sess = tf.Session()
25+
sess.run(init)
26+
27+
for step in range(2001):
28+
sess.run(train, feed_dict={X: x_data, Y: y_data})
29+
if step % 20 == 0:
30+
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W), sess.run(b))
31+
32+
print(sess.run(hypothesis, feed_dict={X: 5}))
33+
print(sess.run(hypothesis, feed_dict={X: 2.5}))

logistic_regression.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
from __future__ import print_function
2+
3+
import os
4+
import numpy as np
5+
import tensorflow as tf
6+
7+
from matplotlib import pyplot as plt
8+
9+
filepath = os.path.abspath(__file__)
10+
PWDPATH = os.path.dirname(filepath)
11+
DATAPATH = PWDPATH + '/logistic_train.txt'
12+
13+
xy = np.loadtxt(DATAPATH, unpack=True, dtype='float32')
14+
x_data = xy[0:-1]
15+
y_data = xy[-1]
16+
17+
X = tf.placeholder(tf.float32)
18+
Y = tf.placeholder(tf.float32)
19+
20+
W = tf.Variable(tf.random_uniform([1, len(x_data)], -1.0, 1.0))
21+
22+
h = tf.matmul(W, X)
23+
hypothesis = tf.div(1., 1. + tf.exp(-h))
24+
25+
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
26+
27+
learning_rate = 0.1
28+
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
29+
train = optimizer.minimize(cost)
30+
31+
init = tf.global_variables_initializer()
32+
33+
W_val = []
34+
cost_val = []
35+
36+
sess = tf.Session()
37+
sess.run(init)
38+
39+
for step in range(2001):
40+
sess.run(train, feed_dict={X: x_data, Y: y_data})
41+
42+
W_val.append(sess.run(W)[0])
43+
cost_val.append(sess.run(cost, feed_dict={X: x_data, Y: y_data}))
44+
45+
if step % 20 == 0:
46+
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)[0])
47+
48+
fig, (ax) = plt.subplots(1, 1)
49+
ax.set_ylim(0, 10)
50+
ax.plot(W_val, cost_val)
51+
plt.ylabel(sess.run(W))
52+
plt.xlabel('W')
53+
plt.show()

logistic_train.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#x0 x1 x2 y
2+
1 2 1 0
3+
1 3 2 0
4+
1 3 4 0
5+
1 5 5 1
6+
1 7 5 1
7+
1 2 5 1
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
from pybrain.datasets import ClassificationDataSet
2+
from pybrain.utilities import percentError
3+
from pybrain.tools.shortcuts import buildNetwork
4+
from pybrain.supervised.trainers import BackpropTrainer
5+
from pybrain.structure.modules import SoftmaxLayer
6+
7+
from scipy import diag
8+
from numpy.random import multivariate_normal
9+
10+
# What is Multivariate normal distribution?
11+
12+
# In probability theory and statistics, the multivariate normal distribution
13+
# or multivariate Gaussian distribution, is a generalization of the one-dimensional
14+
# (univariate) normal distribution to higher dimensions.
15+
16+
# The multivariate normal distribution is often used to describe,
17+
# at least approximately, any set of (possibly) correlated real-valued random
18+
# variables each of which clusters around a mean value.
19+
20+
# https://en.wikipedia.org/wiki/Multivariate_normal_distribution
21+
22+
# METHOD: Classification
23+
# Classify any random point to one of three different classes.
24+
alldata = ClassificationDataSet(2, 1, nb_classes=3)
25+
26+
# DATASET: Multivariate Gaussian Distribution
27+
# Produce a set of points in 2D belonging to three different classes.
28+
29+
# Assumed means of three different classes
30+
means = [(-1,0), (2,4), (3,1)]
31+
# Assumed covariance of three different classes
32+
cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
33+
34+
# Gather a dataset of 400 values, where each value randomly belongs to one class
35+
for n in xrange(400):
36+
for klass in range(3):
37+
input = multivariate_normal(means[klass], cov[klass])
38+
alldata.addSample(input, [klass])
39+
40+
# Randomly split the dataset into 75% training and 25% test data sets.
41+
tstdata, trndata = alldata.splitWithProportion( 0.25 )
42+
43+
# For neural network classification, it is highly advisable to encode classes
44+
# with one output neuron per class.
45+
# Note that this operation duplicates the original targets and stores
46+
# them in an (integer) field named ‘class’.
47+
trndata._convertToOneOfMany( )
48+
tstdata._convertToOneOfMany( )
49+
50+
# Explore trndata and tstdata
51+
print "Number of training patterns: ", len(trndata)
52+
print "Input and output dimensions: ", trndata.indim, trndata.outdim
53+
print "First sample (input, target, class):"
54+
print trndata['input'][0], trndata['target'][0], trndata['class'][0]
55+
56+
# Now build a feed-forward network with 5 hidden units.
57+
# The input and output layer size must match the dataset’s input and target dimension.
58+
# You could add additional hidden layers by inserting more numbers giving the desired layer sizes.
59+
fnn = buildNetwork(trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer)
60+
61+
# Set up a trainer that basically takes the network and training dataset as input.
62+
# We are using a BackpropTrainer for this.
63+
trainer = BackpropTrainer(fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
64+
65+
# Start the training iterations.
66+
for i in range(20):
67+
trainer.trainEpochs(5)
68+
69+
# Evaluate the network on the training and test data.
70+
trnresult = percentError( trainer.testOnClassData(),
71+
trndata['class'] )
72+
tstresult = percentError( trainer.testOnClassData(dataset=tstdata),
73+
tstdata['class'] )
74+
75+
print "epoch: %4d" % trainer.totalepochs, \
76+
"train error: %5.2f%%" % trnresult, \
77+
"test error: %5.2f%%" % tstresult
78+
79+
# To classify new data, just use the activate method
80+
# self.fnn.classify(data)

softmax_classification.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
from __future__ import print_function
2+
3+
import os
4+
5+
import numpy as np
6+
import tensorflow as tf
7+
8+
filepath = os.path.abspath(__file__)
9+
PWDPATH = os.path.dirname(filepath)
10+
DATAPATH = PWDPATH + '/softmax_train.txt'
11+
12+
xy = np.loadtxt(DATAPATH, unpack=True, dtype='float32')
13+
x_data = np.transpose(xy[0:3])
14+
y_data = np.transpose(xy[3:])
15+
16+
17+
X = tf.placeholder("float", [None, 3])
18+
Y = tf.placeholder("float", [None, 3])
19+
20+
W = tf.Variable(tf.zeros([3, 3]))
21+
22+
# matrix shape X=[8, 3], W=[3, 3]
23+
hypothesis = tf.nn.softmax(tf.matmul(X, W))
24+
25+
learning_rate = 0.001
26+
27+
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), reduction_indices=1))
28+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
29+
30+
init = tf.global_variables_initializer()
31+
32+
with tf.Session() as sess:
33+
sess.run(init)
34+
35+
for step in range(2001):
36+
sess.run(optimizer, feed_dict={X: x_data, Y: y_data})
37+
if step % 200 == 0:
38+
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))

softmax_train.txt

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# x0 x1 x2 y[A B C]
2+
1 2 1 0 0 1
3+
1 3 2 0 0 1
4+
1 3 4 0 0 1
5+
1 5 5 0 1 0
6+
1 7 5 0 1 0
7+
1 2 5 0 1 0
8+
1 6 6 1 0 0
9+
1 7 7 1 0 0

tf_basics/basic_operations.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
from __future__ import print_function
2+
3+
import tensorflow as tf
4+
5+
# Start tf session
6+
sess = tf.Session()
7+
8+
a = tf.constant(2)
9+
b = tf.constant(3)
10+
11+
c = a+b
12+
13+
# Print out operation everything is operation
14+
print(a)
15+
print(b)
16+
print(c)
17+
18+
print(a+b)
19+
20+
21+
# Print out the result of operation
22+
print(sess.run(a))
23+
print(sess.run(b))
24+
print(sess.run(c))
25+
print(sess.run(a+b))

01_basics.py renamed to tf_basics/gradient_loss_and_weight.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,5 +32,4 @@
3232

3333
weight = weight + (learning_rate * gradient)
3434

35-
print 'Weight: ', weight
36-
print ''
35+
print 'Weight: ', weight, '\n'

tf_basics/hello_world.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
from __future__ import print_function
2+
3+
4+
import tensorflow as tf
5+
6+
hello = tf.constant('Hello, TensorFlow!')
7+
8+
# Start tf session
9+
sess = tf.Session()
10+
11+
print(sess.run(hello))

tf_basics/using_placeholders.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from __future__ import print_function
2+
3+
4+
import tensorflow as tf
5+
6+
a = tf.placeholder(tf.int16)
7+
b = tf.placeholder(tf.int16)
8+
9+
add = tf.add(a, b)
10+
mul = tf.multiply(a, b)
11+
12+
# Same op?
13+
print(add)
14+
print(a + b)
15+
print(mul)
16+
print(a * b)
17+
18+
# Launch the default graph
19+
with tf.Session() as sess:
20+
print(sess.run(add, feed_dict={a: 2, b: 3}))
21+
22+
# it's work!
23+
feed = {a: 3, b: 5}
24+
print(sess.run(mul, feed_dict=feed))
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from __future__ import print_function
2+
3+
4+
import tensorflow as tf
5+
6+
x_data = [1, 2, 3]
7+
y_data = [2, 4, 6]
8+
9+
W = tf.Variable(tf.random_uniform([1], -10.0, 10.0))
10+
11+
X = tf.placeholder(tf.float32)
12+
Y = tf.placeholder(tf.float32)
13+
14+
hypothesis = W * X
15+
16+
cost = tf.reduce_mean(tf.square(hypothesis - Y))
17+
18+
lr = 0.1
19+
descent = W - tf.multiply(lr, tf.reduce_mean(tf.multiply((tf.multiply(W, X) - Y), X)))
20+
train = W.assign(descent)
21+
22+
init = tf.global_variables_initializer()
23+
24+
sess = tf.Session()
25+
sess.run(init)
26+
27+
for step in range(2001):
28+
sess.run(train, feed_dict={X: x_data, Y: y_data})
29+
if step % 20 == 0:
30+
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))
31+
32+
print(sess.run(hypothesis, feed_dict={X: 5}))
33+
print(sess.run(hypothesis, feed_dict={X: 2.5}))
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
from __future__ import print_function
2+
3+
import tensorflow as tf
4+
from matplotlib import pyplot as plt
5+
6+
# Graph Input
7+
X = [1., 2., 3.]
8+
Y = [2., 4., 6.]
9+
n_samples = len(X)
10+
11+
# model weight
12+
W = tf.placeholder(tf.float32)
13+
14+
# Construct a linear model
15+
hypothesis = tf.multiply(X, W)
16+
17+
# Cost function
18+
# for each y,
19+
# c = sum(y' - y)^2
20+
# take mean value
21+
# c / n_samples
22+
cost = tf.reduce_sum(tf.pow(hypothesis - Y, 2)) / n_samples
23+
24+
init = tf.global_variables_initializer()
25+
26+
# for graphs
27+
W_val = []
28+
cost_val = []
29+
30+
# Launch the graphs
31+
sess = tf.Session()
32+
sess.run(init)
33+
34+
for i in range(-30, 50):
35+
print(i * -0.1, sess.run(cost, feed_dict={W: i * 0.1}))
36+
W_val.append(i * 0.1)
37+
cost_val.append(sess.run(cost, feed_dict={W: i * 0.1}))
38+
39+
plt.plot(W_val, cost_val, 'ro')
40+
plt.ylabel('cost')
41+
plt.xlabel('W')
42+
plt.show()

0 commit comments

Comments
 (0)