From d7014a4ab9293d353a7fed4969a37eb0829c4f60 Mon Sep 17 00:00:00 2001 From: dinesh-packt Date: Tue, 25 Apr 2017 12:53:18 +0530 Subject: [PATCH] Updated Chapter02 --- Chapter02/cifar10_architecture.json | 1 + Chapter02/keras_Azure.py | 33 +++++++++++++++++++++++++++++ Chapter02/keras_VGG16_prebuilt.py | 25 ++++++++++++++++++++++ 3 files changed, 59 insertions(+) create mode 100644 Chapter02/cifar10_architecture.json create mode 100644 Chapter02/keras_Azure.py create mode 100644 Chapter02/keras_VGG16_prebuilt.py diff --git a/Chapter02/cifar10_architecture.json b/Chapter02/cifar10_architecture.json new file mode 100644 index 0000000..3132fc2 --- /dev/null +++ b/Chapter02/cifar10_architecture.json @@ -0,0 +1 @@ +{"class_name": "Sequential", "keras_version": "1.1.1", "config": [{"class_name": "Convolution2D", "config": {"b_regularizer": null, "W_constraint": null, "b_constraint": null, "name": "convolution2d_1", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 32, "input_dtype": "float32", "border_mode": "same", "batch_input_shape": [null, 3, 32, 32], "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_1"}}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_2", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 32, "border_mode": "same", "b_regularizer": null, "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_2"}}, {"class_name": "MaxPooling2D", "config": {"name": "maxpooling2d_1", "trainable": true, "dim_ordering": "th", "pool_size": [2, 2], "strides": [2, 2], "border_mode": "valid"}}, {"class_name": "Dropout", "config": {"p": 0.25, "trainable": true, "name": "dropout_1"}}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_3", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 64, "border_mode": "same", "b_regularizer": null, "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_3"}}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_4", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 64, "border_mode": "valid", "b_regularizer": null, "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_4"}}, {"class_name": "MaxPooling2D", "config": {"name": "maxpooling2d_2", "trainable": true, "dim_ordering": "th", "pool_size": [2, 2], "strides": [2, 2], "border_mode": "valid"}}, {"class_name": "Dropout", "config": {"p": 0.25, "trainable": true, "name": "dropout_2"}}, {"class_name": "Flatten", "config": {"trainable": true, "name": "flatten_1"}}, {"class_name": "Dense", "config": {"W_constraint": null, "b_constraint": null, "name": "dense_1", "activity_regularizer": null, "trainable": true, "init": "glorot_uniform", "bias": true, "input_dim": null, "b_regularizer": null, "W_regularizer": null, "activation": "linear", "output_dim": 512}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_5"}}, {"class_name": "Dropout", "config": {"p": 0.5, "trainable": true, "name": "dropout_3"}}, {"class_name": "Dense", "config": {"W_constraint": null, "b_constraint": null, "name": "dense_2", "activity_regularizer": null, "trainable": true, "init": "glorot_uniform", "bias": true, "input_dim": null, "b_regularizer": null, "W_regularizer": null, "activation": "linear", "output_dim": 10}}, {"class_name": "Activation", "config": {"activation": "softmax", "trainable": true, "name": "activation_6"}}]} \ No newline at end of file diff --git a/Chapter02/keras_Azure.py b/Chapter02/keras_Azure.py new file mode 100644 index 0000000..8bc20a9 --- /dev/null +++ b/Chapter02/keras_Azure.py @@ -0,0 +1,33 @@ +# The script MUST contain a function named azureml_main +# which is the entry point for this module. + +# imports up here can be used to +import pandas as pd +import theano +import theano.tensor as T +from theano import function + +from keras.models import Sequential +from keras.layers import Dense, Activation +import numpy as np +# The entry point function can contain up to two input arguments: +# Param: a pandas.DataFrame +# Param: a pandas.DataFrame +def azureml_main(dataframe1 = None, dataframe2 = None): + # Execution logic goes here + # print('Input pandas.DataFrame #1:\r\n\r\n{0}'.format(dataframe1)) + + # If a zip file is connected to the third input port is connected, + # it is unzipped under ".\Script Bundle". This directory is added + # to sys.path. Therefore, if your zip file contains a Python file + # mymodule.py you can import it using: + # import mymodule + model = Sequential() + model.add(Dense(1, input_dim=784, activation="relu")) + model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) + data = np.random.random((1000,784)) + labels = np.random.randint(2, size=(1000,1)) + model.fit(data, labels, nb_epoch=10, batch_size=32) + model.evaluate(data, labels) + + return dataframe1, diff --git a/Chapter02/keras_VGG16_prebuilt.py b/Chapter02/keras_VGG16_prebuilt.py new file mode 100644 index 0000000..4d426ee --- /dev/null +++ b/Chapter02/keras_VGG16_prebuilt.py @@ -0,0 +1,25 @@ +from keras.applications.vgg16 import VGG16 +from keras.models import Model +from keras.preprocessing import image +from keras.applications.vgg16 import preprocess_input +import numpy as np + + +# pre-built and pre-trained deep learning VGG16 model +base_model = VGG16(weights='imagenet', include_top=True) +for i, layer in enumerate(base_model.layers): + print (i, layer.name, layer.output_shape) + +# extract features from block4_pool block +model = Model(input=base_model.input, output=base_model.get_layer('block4_pool').output) + +img_path = 'cat.jpg' +img = image.load_img(img_path, target_size=(224, 224)) +x = image.img_to_array(img) +x = np.expand_dims(x, axis=0) +x = preprocess_input(x) + +# get the features from this block +features = model.predict(x) + +print features