|
| 1 | +#! /usr/bin/python |
| 2 | +# -*- coding: utf8 -*- |
| 3 | + |
| 4 | + |
| 5 | +import tensorflow as tf |
| 6 | +import tensorlayer as tl |
| 7 | +slim = tf.contrib.slim |
| 8 | +from tensorflow.contrib.slim.python.slim.nets.alexnet import alexnet_v2 |
| 9 | +from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base, inception_v3, inception_v3_arg_scope |
| 10 | +# from tensorflow.contrib.slim.python.slim.nets.resnet_v2 import resnet_v2_152 |
| 11 | +# from tensorflow.contrib.slim.python.slim.nets.vgg import vgg_16 |
| 12 | +import skimage |
| 13 | +import skimage.io |
| 14 | +import skimage.transform |
| 15 | +import time |
| 16 | +from data.imagenet_classes import * |
| 17 | +import numpy as np |
| 18 | +""" |
| 19 | +You will learn: |
| 20 | +1. What is TF-Slim ? |
| 21 | +1. How to combine TensorLayer and TF-Slim ? |
| 22 | +
|
| 23 | +Introduction of Slim : https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim |
| 24 | +Slim Pre-trained Models : https://github.com/tensorflow/models/tree/master/slim |
| 25 | +
|
| 26 | +With the help of SlimNetsLayer, all Slim Model can be combined into TensorLayer. |
| 27 | +All models in the following link, end with `return net, end_points`` are available. |
| 28 | +https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim/python/slim/nets |
| 29 | +
|
| 30 | +
|
| 31 | +Bugs |
| 32 | +----- |
| 33 | +tf.variable_scope : |
| 34 | + https://groups.google.com/a/tensorflow.org/forum/#!topic/discuss/RoxrU3UnbFA |
| 35 | +load inception_v3 for prediction: |
| 36 | + http://stackoverflow.com/questions/39357454/restore-checkpoint-in-tensorflow-tensor-name-not-found |
| 37 | +""" |
| 38 | +def load_image(path): |
| 39 | + # load image |
| 40 | + img = skimage.io.imread(path) |
| 41 | + img = img / 255.0 |
| 42 | + assert (0 <= img).all() and (img <= 1.0).all() |
| 43 | + # print "Original Image Shape: ", img.shape |
| 44 | + # we crop image from center |
| 45 | + short_edge = min(img.shape[:2]) |
| 46 | + yy = int((img.shape[0] - short_edge) / 2) |
| 47 | + xx = int((img.shape[1] - short_edge) / 2) |
| 48 | + crop_img = img[yy: yy + short_edge, xx: xx + short_edge] |
| 49 | + # resize to 224, 224 |
| 50 | + resized_img = skimage.transform.resize(crop_img, (299, 299)) |
| 51 | + return resized_img |
| 52 | + |
| 53 | + |
| 54 | +def print_prob(prob): |
| 55 | + synset = class_names |
| 56 | + # print prob |
| 57 | + pred = np.argsort(prob)[::-1] |
| 58 | + # Get top1 label |
| 59 | + top1 = synset[pred[0]] |
| 60 | + print("Top1: ", top1, prob[pred[0]]) |
| 61 | + # Get top5 label |
| 62 | + top5 = [(synset[pred[i]], prob[pred[i]]) for i in range(5)] |
| 63 | + print("Top5: ", top5) |
| 64 | + return top1 |
| 65 | + |
| 66 | + |
| 67 | +## Alexnet_v2 / All Slim nets can be merged into TensorLayer |
| 68 | +# x = tf.placeholder(tf.float32, shape=[None, 299, 299, 3]) |
| 69 | +# net_in = tl.layers.InputLayer(x, name='input_layer') |
| 70 | +# network = tl.layers.SlimNetsLayer(layer=net_in, slim_layer=alexnet_v2, |
| 71 | +# slim_args= { |
| 72 | +# 'num_classes' : 1000, |
| 73 | +# 'is_training' : True, |
| 74 | +# 'dropout_keep_prob' : 0.5, |
| 75 | +# 'spatial_squeeze' : True, |
| 76 | +# 'scope' : 'alexnet_v2' |
| 77 | +# } |
| 78 | +# ) |
| 79 | +# sess = tf.InteractiveSession() |
| 80 | +# sess.run(tf.initialize_all_variables()) |
| 81 | +# network.print_params() |
| 82 | +# exit() |
| 83 | + |
| 84 | +# InceptionV3 |
| 85 | +x = tf.placeholder(tf.float32, shape=[None, 299, 299, 3]) |
| 86 | +net_in = tl.layers.InputLayer(x, name='input_layer') # DH |
| 87 | +with slim.arg_scope(inception_v3_arg_scope()): |
| 88 | + # logits, end_points = inception_v3(X, num_classes=1001, |
| 89 | + # is_training=False) |
| 90 | + network = tl.layers.SlimNetsLayer(layer=net_in, slim_layer=inception_v3, |
| 91 | + slim_args= { |
| 92 | + 'num_classes' : 1001, |
| 93 | + 'is_training' : False, |
| 94 | + # 'dropout_keep_prob' : 0.8, # for training |
| 95 | + # 'min_depth' : 16, |
| 96 | + # 'depth_multiplier' : 1.0, |
| 97 | + # 'prediction_fn' : slim.softmax, |
| 98 | + # 'spatial_squeeze' : True, |
| 99 | + # 'reuse' : None, |
| 100 | + # 'scope' : 'InceptionV3' |
| 101 | + }, |
| 102 | + name='' |
| 103 | + ) |
| 104 | +saver = tf.train.Saver() |
| 105 | + |
| 106 | +sess = tf.InteractiveSession() |
| 107 | +sess.run(tf.initialize_all_variables()) |
| 108 | + |
| 109 | +# with tf.Session() as sess: |
| 110 | +saver.restore(sess, "inception_v3.ckpt") # download from https://github.com/tensorflow/models/tree/master/slim#Install |
| 111 | +print("Model Restored") |
| 112 | +network.print_params(False) |
| 113 | + |
| 114 | + |
| 115 | +from scipy.misc import imread, imresize |
| 116 | +y = network.outputs |
| 117 | +probs = tf.nn.softmax(y) |
| 118 | +img1 = load_image("data/puzzle.jpeg") |
| 119 | +img1 = img1.reshape((1, 299, 299, 3)) |
| 120 | + |
| 121 | +start_time = time.time() |
| 122 | +prob = sess.run(probs, feed_dict= {x : img1}) |
| 123 | +print("End time : %.5ss" % (time.time() - start_time)) |
| 124 | +print_prob(prob[0][1:]) # Note : as it have 1001 outputs, the 1st output is nothing |
| 125 | + |
| 126 | + |
| 127 | + |
| 128 | + |
| 129 | + |
| 130 | + |
| 131 | +# |
0 commit comments