Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
d9dc346
TF bug fixing in Tutorials
May 17, 2018
cf2795e
Error fix in #476
May 17, 2018
92d3d05
Issue with Flags in Tutorials Fixed
May 17, 2018
35ef687
Missing import fixed
May 17, 2018
3c48c38
Changelog Update
May 17, 2018
a29d3ca
VGG19 import error fix
May 17, 2018
f90dec5
Error fixing in VGG tutorials
May 17, 2018
acaab7c
TFRecord Shape Error Fix
May 17, 2018
a872010
Sess Initialization Error Fix
May 17, 2018
934ad54
Squeezenet model loading from "models" dir
May 17, 2018
c5fdf08
PTB tutorials import issue fixed
May 17, 2018
f4db275
mobilenet load from dir "models"
May 17, 2018
a5a24e9
YAPF error fix
May 17, 2018
78ac5cb
Missing Import fixed
May 17, 2018
20e1c48
Various Fixes on Tutorials
May 17, 2018
48459b3
YAPF error correct
May 17, 2018
ed58509
Update CHANGELOG.md
DEKHTIARJonathan May 17, 2018
e43792e
Merge branch 'master' into tutorial_fix
DEKHTIARJonathan May 18, 2018
273f7eb
Merge branch 'master' into tutorial_fix
DEKHTIARJonathan May 18, 2018
51cff0a
update VGG16 tutorial, auto download model
zsdonghao May 18, 2018
6a9279f
Merge branch 'master' into tutorial_fix
DEKHTIARJonathan May 18, 2018
6f9d0e7
Merge branch 'master' into tutorial_fix
DEKHTIARJonathan May 19, 2018
5da4ca2
Merge branch 'master' into tutorial_fix
DEKHTIARJonathan May 22, 2018
a2e8d9a
Merge branch 'master' into tutorial_fix
DEKHTIARJonathan May 22, 2018
0086e79
Merge branch 'master' into tutorial_fix
DEKHTIARJonathan May 23, 2018
b2de06b
Merge branch 'master' into tutorial_fix
May 27, 2018
523b615
Merge branch 'master' into tutorial_fix
May 28, 2018
b350907
Python 3 Unicode Encoding Error
May 29, 2018
b940da9
Merge branch 'master' into tutorial_fix
May 29, 2018
757b884
Merge branch 'master' into tutorial_fix
May 30, 2018
bb4be34
Merge branch 'master' into tutorial_fix
May 30, 2018
0e40149
Merge branch 'master' into tutorial_fix
May 30, 2018
ac7bebd
Deprecation Warning Fix
May 30, 2018
cf461e3
Merge branch 'master' into tutorial_fix
Jun 1, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,10 @@ To release a new version, please update the changelog as followed:
- Error in `tl.layers.TernaryConv2d` fixed - self.inputs not defined (by @DEKHTIARJonathan in #658)
- Deprecation warning fixed in `tl.layers.binary._compute_threshold()` (by @DEKHTIARJonathan in #658)
- All references to `tf.logging` replaced by `tl.logging` (by @DEKHTIARJonathan in #661)

- Tutorial:
- `tutorial_word2vec_basic.py` saving issue #476 fixed (by @DEKHTIARJonathan in #635)
- All tutorials tested and errors have been fixed (by @DEKHTIARJonathan in #635)

### Security

### Dependencies Update
Expand Down
2 changes: 1 addition & 1 deletion example/tutorial_cifar10_tfrecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def read_and_decode(filename, is_train=None):
# print("img_batch : %s" % img_batch._shape)
# print("label_batch : %s" % label_batch._shape)
#
# init = tf.initialize_all_variables()
# init = tf.global_variables_initializer()
# with tf.Session() as sess:
# sess.run(init)
# coord = tf.train.Coordinator()
Expand Down
3 changes: 3 additions & 0 deletions example/tutorial_frozenlake_dqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,10 @@

"""

import time
import gym
import numpy as np

import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
Expand Down
6 changes: 2 additions & 4 deletions example/tutorial_generate_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def customized_clean_str(string):


def customized_read_words(input_fpath): #, dictionary):
with open(input_fpath, "r") as f:
with open(input_fpath, "r", encoding="utf8") as f:
words = f.read()
# Clean the data
words = customized_clean_str(words)
Expand Down Expand Up @@ -155,7 +155,7 @@ def main_restore_embedding_layer():

emb_net = tl.layers.EmbeddingInputlayer(x, vocabulary_size, embedding_size, name='emb')

# sess.run(tf.initialize_all_variables())
# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)

tl.files.assign_params(sess, [load_params[0]], emb_net)
Expand Down Expand Up @@ -369,5 +369,3 @@ def loss_fn(outputs, targets, batch_size, sequence_length):

# How to generate text from a given context
main_lstm_generate_text()

#
14 changes: 8 additions & 6 deletions example/tutorial_inceptionV3_tfslim.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,14 @@

slim = tf.contrib.slim
try:
from data.imagenet_classes import *
from tensorlayer.models.imagenet_classes import *
except Exception as e:
raise Exception(
"{} / download the file from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data".format(e)
)

MODEL_PATH = os.path.join("models", 'inception_v3.ckpt')


def load_image(path):
# load image
Expand All @@ -58,7 +60,7 @@ def load_image(path):
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy:yy + short_edge, xx:xx + short_edge]
# resize to 299, 299
resized_img = skimage.transform.resize(crop_img, (299, 299))
resized_img = skimage.transform.resize(crop_img, (299, 299), anti_aliasing=False)
return resized_img


Expand Down Expand Up @@ -89,7 +91,7 @@ def print_prob(prob):
# name='alexnet_v2' # <-- the name should be the same with the ckpt model
# )
# sess = tf.InteractiveSession()
# # sess.run(tf.initialize_all_variables())
# # sess.run(tf.global_variables_initializer())
# tl.layers.initialize_global_variables(sess)
# network.print_params()

Expand Down Expand Up @@ -122,15 +124,15 @@ def print_prob(prob):
network.print_params(False)

saver = tf.train.Saver()
if not os.path.isfile("inception_v3.ckpt"):
if not os.path.isfile(MODEL_PATH):
raise Exception(
"Please download inception_v3 ckpt from https://github.com/tensorflow/models/tree/master/research/slim"
)

try: # TF12+
saver.restore(sess, "./inception_v3.ckpt")
saver.restore(sess, MODEL_PATH)
except Exception: # TF11
saver.restore(sess, "inception_v3.ckpt")
saver.restore(sess, MODEL_PATH)
print("Model Restored")

y = network.outputs
Expand Down
8 changes: 5 additions & 3 deletions example/tutorial_mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
BatchNormLayer, Conv2d, DepthwiseConv2d, FlattenLayer, GlobalMeanPool2d, InputLayer, ReshapeLayer
)

MODEL_PATH = os.path.join("models", "mobilenet.npz")


def conv_block(n, n_filter, filter_size=(3, 3), strides=(1, 1), is_train=False, name='conv_block'):
# ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py
Expand Down Expand Up @@ -101,10 +103,10 @@ def mobilenet(x, is_train=True, reuse=False):
sess = tf.InteractiveSession()
# tl.layers.initialize_global_variables(sess)

if not os.path.isfile("mobilenet.npz"):
if not os.path.isfile(MODEL_PATH):
raise Exception("Please download mobilenet.npz from : https://github.com/tensorlayer/pretrained-models")

tl.files.load_and_assign_npz(sess=sess, name='mobilenet.npz', network=n)
tl.files.load_and_assign_npz(sess=sess, name=MODEL_PATH, network=n)

img = tl.vis.read_image('data/tiger.jpeg')
img = tl.prepro.imresize(img, (224, 224)) / 255
Expand All @@ -114,4 +116,4 @@ def mobilenet(x, is_train=True, reuse=False):

print(" End time : %.5ss" % (time.time() - start_time))
print('Predicted :', decode_predictions([prob], top=3)[0])
# tl.files.save_npz(n.all_params, name='mobilenet.npz', sess=sess)
# tl.files.save_npz(n.all_params, name=MODEL_PATH, sess=sess)
16 changes: 13 additions & 3 deletions example/tutorial_ptb_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,17 +100,27 @@

"""

import sys
import time

import numpy as np
import tensorflow as tf

import tensorlayer as tl

flags = tf.flags
flags = tf.app.flags

flags.DEFINE_string("model", "small", "A type of model. Possible options are: small, medium, large.")

if (tf.VERSION >= '1.5'):
# parse flags
flags.FLAGS(sys.argv, known_only=True)
flags.ArgumentParser()

FLAGS = flags.FLAGS

tf.logging.set_verbosity(tf.logging.DEBUG)


def main(_):
"""
Expand Down Expand Up @@ -235,7 +245,7 @@ def inference(x, is_training, num_steps, reuse=None):
# Inference for Testing (Evaluation)
net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True)

# sess.run(tf.initialize_all_variables())
# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)

def loss_fn(outputs, targets): #, batch_size, num_steps):
Expand Down Expand Up @@ -269,7 +279,7 @@ def loss_fn(outputs, targets): #, batch_size, num_steps):
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))

# sess.run(tf.initialize_all_variables())
# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)

net.print_params()
Expand Down
16 changes: 13 additions & 3 deletions example/tutorial_ptb_lstm_state_is_tuple.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,17 +101,27 @@

"""

import sys
import time

import numpy as np
import tensorflow as tf

import tensorlayer as tl

flags = tf.flags
flags = tf.app.flags

flags.DEFINE_string("model", "small", "A type of model. Possible options are: small, medium, large.")

if (tf.VERSION >= '1.5'):
# parse flags
flags.FLAGS(sys.argv, known_only=True)
flags.ArgumentParser()

FLAGS = flags.FLAGS

tf.logging.set_verbosity(tf.logging.DEBUG)


def main(_):
"""
Expand Down Expand Up @@ -241,7 +251,7 @@ def inference(x, is_training, num_steps, reuse=None):
# Inference for Testing (Evaluation)
net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True)

# sess.run(tf.initialize_all_variables())
# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)

def loss_fn(outputs, targets, batch_size):
Expand Down Expand Up @@ -275,7 +285,7 @@ def loss_fn(outputs, targets, batch_size):
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))

# sess.run(tf.initialize_all_variables())
# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)

net.print_params()
Expand Down
8 changes: 5 additions & 3 deletions example/tutorial_squeezenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
import tensorlayer as tl
from tensorlayer.layers import (ConcatLayer, Conv2d, DropoutLayer, GlobalMeanPool2d, InputLayer, MaxPool2d)

MODEL_PATH = os.path.join("models", "squeezenet.npz")


def decode_predictions(preds, top=5): # keras.applications.resnet50
fpath = os.path.join("data", "imagenet_class_index.json")
Expand Down Expand Up @@ -114,8 +116,8 @@ def squeezenet(x, is_train=True, reuse=False):
sess = tf.InteractiveSession()
tl.layers.initialize_global_variables(sess)

if tl.files.file_exists('squeezenet.npz'):
tl.files.load_and_assign_npz(sess=sess, name='squeezenet.npz', network=n)
if tl.files.file_exists(MODEL_PATH):
tl.files.load_and_assign_npz(sess=sess, name=MODEL_PATH, network=n)
else:
raise Exception(
"please download the pre-trained squeezenet.npz from https://github.com/tensorlayer/pretrained-models"
Expand All @@ -129,4 +131,4 @@ def squeezenet(x, is_train=True, reuse=False):
print(" End time : %.5ss" % (time.time() - start_time))

print('Predicted:', decode_predictions([prob], top=3)[0])
tl.files.save_npz(n.all_params, name='squeezenet.npz', sess=sess)
tl.files.save_npz(n.all_params, name=MODEL_PATH, sess=sess)
8 changes: 3 additions & 5 deletions example/tutorial_tfrecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ def read_and_decode(filename):
img_batch, label_batch = tf.train.shuffle_batch(
[img, label], batch_size=4, capacity=2000, min_after_dequeue=1000, num_threads=16
)
print("img_batch : %s" % img_batch._shape)
print("label_batch : %s" % label_batch._shape)
# init = tf.initialize_all_variables()
print("img_batch : %s" % img_batch.shape)
print("label_batch : %s" % label_batch.shape)
# init = tf.global_variables_initializer()
with tf.Session() as sess:
# sess.run(init)
tl.layers.initialize_global_variables(sess)
Expand All @@ -116,5 +116,3 @@ def read_and_decode(filename):
coord.request_stop()
coord.join(threads)
sess.close()

#
6 changes: 3 additions & 3 deletions example/tutorial_tfrecord2.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ def read_and_decode(filename):
img_batch, label_batch = tf.train.shuffle_batch([img, label], batch_size=4, capacity=50000, \
min_after_dequeue=10000, num_threads=1)

print("img_batch : %s" % img_batch._shape)
print("label_batch : %s" % label_batch._shape)
# init = tf.initialize_all_variables()
print("img_batch : %s" % img_batch.shape)
print("label_batch : %s" % label_batch.shape)
# init = tf.global_variables_initializer()
with tf.Session() as sess:
# sess.run(init)
tl.layers.initialize_global_variables(sess)
Expand Down
6 changes: 2 additions & 4 deletions example/tutorial_tfrecord3.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def prefetch_input_data(
num_threads=4
)
sess = tf.Session()
# sess.run(tf.initialize_all_variables())
# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
Expand Down Expand Up @@ -447,7 +447,7 @@ def batch_with_dynamic_pad(images_and_captions, batch_size, queue_capacity, add_
batch_with_dynamic_pad(images_and_captions=[[img, img_cap]], batch_size=4, queue_capacity=50000)
)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(3):
Expand All @@ -460,5 +460,3 @@ def batch_with_dynamic_pad(images_and_captions, batch_size, queue_capacity, add_
coord.request_stop()
coord.join(threads)
sess.close()

#
16 changes: 10 additions & 6 deletions example/tutorial_vgg16.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,17 @@
"""

import os
import time

import numpy as np
from scipy.misc import imread, imresize

import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *

try:
from data.imagenet_classes import *
from tensorlayer.models.imagenet_classes import *
except Exception as e:
raise Exception(
"{} / download the file from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data".format(e)
Expand Down Expand Up @@ -155,14 +159,14 @@ def fc_layers(net):
net.print_params()
net.print_layers()

if not os.path.isfile("vgg16_weights.npz"):
print("Please download vgg16_weights.npz from : http://www.cs.toronto.edu/~frossard/post/vgg16/")
exit()
npz = np.load('vgg16_weights.npz')
tl.files.maybe_download_and_extract(
'vgg16_weights.npz', 'models', 'http://www.cs.toronto.edu/~frossard/vgg16/', expected_bytes=553436134
)
npz = np.load(os.path.join('models', 'vgg16_weights.npz'))

params = []
for val in sorted(npz.items()):
print(" Loading %s" % str(val[1].shape))
print(" Loading params %s" % str(val[1].shape))
params.append(val[1])

tl.files.assign_params(sess, params, net)
Expand Down
Loading