From 4ae6b14b721c35617a9bc2b3194d38f13ac31a82 Mon Sep 17 00:00:00 2001 From: dinesh-packt Date: Tue, 25 Apr 2017 12:52:33 +0530 Subject: [PATCH] Updated Chapter01 --- Chapter01/keras_MINST_V1.py | 61 +++++++++++++++++++++++++ Chapter01/keras_MINST_V2.py | 64 +++++++++++++++++++++++++++ Chapter01/keras_MINST_V3.py | 88 +++++++++++++++++++++++++++++++++++++ Chapter01/keras_MINST_V4.py | 88 +++++++++++++++++++++++++++++++++++++ 4 files changed, 301 insertions(+) create mode 100644 Chapter01/keras_MINST_V1.py create mode 100644 Chapter01/keras_MINST_V2.py create mode 100644 Chapter01/keras_MINST_V3.py create mode 100644 Chapter01/keras_MINST_V4.py diff --git a/Chapter01/keras_MINST_V1.py b/Chapter01/keras_MINST_V1.py new file mode 100644 index 0000000..09f852e --- /dev/null +++ b/Chapter01/keras_MINST_V1.py @@ -0,0 +1,61 @@ +from __future__ import print_function +import numpy as np +from keras.datasets import mnist +from keras.models import Sequential +from keras.layers.core import Dense, Activation +from keras.optimizers import SGD +from keras.utils import np_utils + +np.random.seed(1671) # for reproducibility + +# network and training +NB_EPOCH = 200 +BATCH_SIZE = 128 +VERBOSE = 1 +NB_CLASSES = 10 # number of outputs = number of digits +OPTIMIZER = SGD() # SGD optimizer, explained later in this chapter +N_HIDDEN = 128 +VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION + +# data: shuffled and split between train and test sets +# +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 +RESHAPED = 784 +# +X_train = X_train.reshape(60000, RESHAPED) +X_test = X_test.reshape(10000, RESHAPED) +X_train = X_train.astype('float32') +X_test = X_test.astype('float32') + +# normalize +# +X_train /= 255 +X_test /= 255 +print(X_train.shape[0], 'train samples') +print(X_test.shape[0], 'test samples') + +# convert class vectors to binary class matrices +Y_train = np_utils.to_categorical(y_train, NB_CLASSES) +Y_test = np_utils.to_categorical(y_test, NB_CLASSES) + +# 10 outputs +# final stage is softmax + +model = Sequential() +model.add(Dense(NB_CLASSES, input_shape=(RESHAPED,))) +model.add(Activation('softmax')) + +model.summary() + +model.compile(loss='categorical_crossentropy', + optimizer=OPTIMIZER, + metrics=['accuracy']) + +history = model.fit(X_train, Y_train, + batch_size=BATCH_SIZE, epochs=NB_EPOCH, + verbose=VERBOSE, validation_split=VALIDATION_SPLIT) +score = model.evaluate(X_test, Y_test, verbose=VERBOSE) +print("\nTest score:", score[0]) +print('Test accuracy:', score[1]) diff --git a/Chapter01/keras_MINST_V2.py b/Chapter01/keras_MINST_V2.py new file mode 100644 index 0000000..b3c0016 --- /dev/null +++ b/Chapter01/keras_MINST_V2.py @@ -0,0 +1,64 @@ +from __future__ import print_function +import numpy as np +from keras.datasets import mnist +from keras.models import Sequential +from keras.layers.core import Dense, Activation +from keras.optimizers import SGD +from keras.utils import np_utils + +np.random.seed(1671) # for reproducibility + +# network and training +NB_EPOCH = 20 +BATCH_SIZE = 128 +VERBOSE = 1 +NB_CLASSES = 10 # number of outputs = number of digits +OPTIMIZER = SGD() # optimizer, explained later in this chapter +N_HIDDEN = 128 +VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION + +# data: shuffled and split between train and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 +RESHAPED = 784 +# +X_train = X_train.reshape(60000, RESHAPED) +X_test = X_test.reshape(10000, RESHAPED) +X_train = X_train.astype('float32') +X_test = X_test.astype('float32') + +# normalize +X_train /= 255 +X_test /= 255 +print(X_train.shape[0], 'train samples') +print(X_test.shape[0], 'test samples') + +# convert class vectors to binary class matrices +Y_train = np_utils.to_categorical(y_train, NB_CLASSES) +Y_test = np_utils.to_categorical(y_test, NB_CLASSES) + +# M_HIDDEN hidden layers +# 10 outputs +# final stage is softmax + +model = Sequential() +model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) +model.add(Activation('relu')) +model.add(Dense(N_HIDDEN)) +model.add(Activation('relu')) +model.add(Dense(NB_CLASSES)) +model.add(Activation('softmax')) +model.summary() + +model.compile(loss='categorical_crossentropy', + optimizer=OPTIMIZER, + metrics=['accuracy']) + +history = model.fit(X_train, Y_train, + batch_size=BATCH_SIZE, epochs=NB_EPOCH, + verbose=VERBOSE, validation_split=VALIDATION_SPLIT) + +score = model.evaluate(X_test, Y_test, verbose=VERBOSE) +print("\nTest score:", score[0]) +print('Test accuracy:', score[1]) \ No newline at end of file diff --git a/Chapter01/keras_MINST_V3.py b/Chapter01/keras_MINST_V3.py new file mode 100644 index 0000000..25d3357 --- /dev/null +++ b/Chapter01/keras_MINST_V3.py @@ -0,0 +1,88 @@ +from __future__ import print_function +import numpy as np +from keras.datasets import mnist +from keras.models import Sequential +from keras.layers.core import Dense, Dropout, Activation +from keras.optimizers import SGD +from keras.utils import np_utils + +import matplotlib.pyplot as plt + +np.random.seed(1671) # for reproducibility + +# network and training +NB_EPOCH = 250 +BATCH_SIZE = 128 +VERBOSE = 1 +NB_CLASSES = 10 # number of outputs = number of digits +OPTIMIZER = SGD() # optimizer, explained later in this chapter +N_HIDDEN = 128 +VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION +DROPOUT = 0.3 + +# data: shuffled and split between train and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 +RESHAPED = 784 +# +X_train = X_train.reshape(60000, RESHAPED) +X_test = X_test.reshape(10000, RESHAPED) +X_train = X_train.astype('float32') +X_test = X_test.astype('float32') + +# normalize +X_train /= 255 +X_test /= 255 +print(X_train.shape[0], 'train samples') +print(X_test.shape[0], 'test samples') + +# convert class vectors to binary class matrices +Y_train = np_utils.to_categorical(y_train, NB_CLASSES) +Y_test = np_utils.to_categorical(y_test, NB_CLASSES) + +# M_HIDDEN hidden layers +# 10 outputs +# final stage is softmax + +model = Sequential() +model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) +model.add(Activation('relu')) +model.add(Dropout(DROPOUT)) +model.add(Dense(N_HIDDEN)) +model.add(Activation('relu')) +model.add(Dropout(DROPOUT)) +model.add(Dense(NB_CLASSES)) +model.add(Activation('softmax')) +model.summary() + +model.compile(loss='categorical_crossentropy', + optimizer=OPTIMIZER, + metrics=['accuracy']) + +history = model.fit(X_train, Y_train, + batch_size=BATCH_SIZE, epochs=NB_EPOCH, + verbose=VERBOSE, validation_split=VALIDATION_SPLIT) + +score = model.evaluate(X_test, Y_test, verbose=VERBOSE) +print("\nTest score:", score[0]) +print('Test accuracy:', score[1]) + +# list all data in history +print(history.history.keys()) +# summarize history for accuracy +plt.plot(history.history['acc']) +plt.plot(history.history['val_acc']) +plt.title('model accuracy') +plt.ylabel('accuracy') +plt.xlabel('epoch') +plt.legend(['train', 'test'], loc='upper left') +plt.show() +# summarize history for loss +plt.plot(history.history['loss']) +plt.plot(history.history['val_loss']) +plt.title('model loss') +plt.ylabel('loss') +plt.xlabel('epoch') +plt.legend(['train', 'test'], loc='upper left') +plt.show() \ No newline at end of file diff --git a/Chapter01/keras_MINST_V4.py b/Chapter01/keras_MINST_V4.py new file mode 100644 index 0000000..871e1cb --- /dev/null +++ b/Chapter01/keras_MINST_V4.py @@ -0,0 +1,88 @@ +from __future__ import print_function +import numpy as np +from keras.datasets import mnist +from keras.models import Sequential +from keras.layers.core import Dense, Dropout, Activation +from keras.optimizers import RMSprop +from keras.utils import np_utils + +import matplotlib.pyplot as plt + +np.random.seed(1671) # for reproducibility + +# network and training +NB_EPOCH = 20 +BATCH_SIZE = 128 +VERBOSE = 1 +NB_CLASSES = 10 # number of outputs = number of digits +OPTIMIZER = RMSprop() # optimizer, explainedin this chapter +N_HIDDEN = 128 +VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION +DROPOUT = 0.3 + +# data: shuffled and split between train and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 +RESHAPED = 784 +# +X_train = X_train.reshape(60000, RESHAPED) +X_test = X_test.reshape(10000, RESHAPED) +X_train = X_train.astype('float32') +X_test = X_test.astype('float32') + +# normalize +X_train /= 255 +X_test /= 255 +print(X_train.shape[0], 'train samples') +print(X_test.shape[0], 'test samples') + +# convert class vectors to binary class matrices +Y_train = np_utils.to_categorical(y_train, NB_CLASSES) +Y_test = np_utils.to_categorical(y_test, NB_CLASSES) + +# M_HIDDEN hidden layers +# 10 outputs +# final stage is softmax + +model = Sequential() +model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) +model.add(Activation('relu')) +model.add(Dropout(DROPOUT)) +model.add(Dense(N_HIDDEN)) +model.add(Activation('relu')) +model.add(Dropout(DROPOUT)) +model.add(Dense(NB_CLASSES)) +model.add(Activation('softmax')) +model.summary() + +model.compile(loss='categorical_crossentropy', + optimizer=OPTIMIZER, + metrics=['accuracy']) + +history = model.fit(X_train, Y_train, + batch_size=BATCH_SIZE, epochs=NB_EPOCH, + verbose=VERBOSE, validation_split=VALIDATION_SPLIT) + +score = model.evaluate(X_test, Y_test, verbose=VERBOSE) +print("\nTest score:", score[0]) +print('Test accuracy:', score[1]) + +# list all data in history +print(history.history.keys()) +# summarize history for accuracy +plt.plot(history.history['acc']) +plt.plot(history.history['val_acc']) +plt.title('model accuracy') +plt.ylabel('accuracy') +plt.xlabel('epoch') +plt.legend(['train', 'test'], loc='upper left') +plt.show() +# summarize history for loss +plt.plot(history.history['loss']) +plt.plot(history.history['val_loss']) +plt.title('model loss') +plt.ylabel('loss') +plt.xlabel('epoch') +plt.legend(['train', 'test'], loc='upper left') +plt.show() \ No newline at end of file