Skip to content

Refactor TensorFlow 2 code to hybrid functions #418

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,13 @@
"b = tf.Variable(tf.zeros([num_classes]), name=\"bias\")\n",
"\n",
"# Logistic regression (Wx + b).\n",
"@tf.function\n",
"def logistic_regression(x):\n",
" # Apply softmax to normalize the logits to a probability distribution.\n",
" return tf.nn.softmax(tf.matmul(x, W) + b)\n",
"\n",
"# Cross-Entropy loss function.\n",
"@tf.function\n",
"def cross_entropy(y_pred, y_true):\n",
" # Encode label to a one hot vector.\n",
" y_true = tf.one_hot(y_true, depth=num_classes)\n",
Expand All @@ -112,6 +114,7 @@
" return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred),1))\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -128,6 +131,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
2 changes: 2 additions & 0 deletions tensorflow_v2/notebooks/2_BasicModels/word2vec.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@
" x_embed = tf.nn.embedding_lookup(embedding, x)\n",
" return x_embed\n",
"\n",
"@tf.function\n",
"def nce_loss(x_embed, y):\n",
" with tf.device('/cpu:0'):\n",
" # Compute the average NCE loss for the batch.\n",
Expand All @@ -200,6 +201,7 @@
" return loss\n",
"\n",
"# Evaluation.\n",
"@tf.function.\n",
"def evaluate(x_embed):\n",
" with tf.device('/cpu:0'):\n",
" # Compute the cosine similarity between input data embedding and every embedding vectors\n",
Expand Down
4 changes: 4 additions & 0 deletions tensorflow_v2/notebooks/3_NeuralNetworks/autoencoder.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@
"outputs": [],
"source": [
"# Building the encoder.\n",
"@tf.function\n",
"def encoder(x):\n",
" # Encoder Hidden layer with sigmoid activation.\n",
" layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),\n",
Expand All @@ -140,6 +141,7 @@
"\n",
"\n",
"# Building the decoder.\n",
"@tf.function\n",
"def decoder(x):\n",
" # Decoder Hidden layer with sigmoid activation.\n",
" layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),\n",
Expand All @@ -157,6 +159,7 @@
"outputs": [],
"source": [
"# Mean square loss between original images and reconstructed ones.\n",
"@tf.function\n",
"def mean_square(reconstructed, original):\n",
" return tf.reduce_mean(tf.pow(original - reconstructed, 2))\n",
"\n",
Expand All @@ -171,6 +174,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@
" self.out = layers.Dense(num_classes)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def call(self, x, is_training=False):\n",
" x = self.bi_lstm(x)\n",
" x = self.out(x)\n",
Expand All @@ -140,6 +141,7 @@
"source": [
"# Cross-Entropy Loss.\n",
"# Note that this will apply 'softmax' to the logits.\n",
"@tf.function\n",
"def cross_entropy_loss(x, y):\n",
" # Convert labels to int 64 for tf cross-entropy function.\n",
" y = tf.cast(y, tf.int64)\n",
Expand All @@ -149,6 +151,7 @@
" return tf.reduce_mean(loss)\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -165,6 +168,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@
" self.out = layers.Dense(num_classes)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def call(self, x, is_training=False):\n",
" x = tf.reshape(x, [-1, 28, 28, 1])\n",
" x = self.conv1(x)\n",
Expand Down Expand Up @@ -154,6 +155,7 @@
"source": [
"# Cross-Entropy Loss.\n",
"# Note that this will apply 'softmax' to the logits.\n",
"@tf.function\n",
"def cross_entropy_loss(x, y):\n",
" # Convert labels to int 64 for tf cross-entropy function.\n",
" y = tf.cast(y, tf.int64)\n",
Expand All @@ -163,6 +165,7 @@
" return tf.reduce_mean(loss)\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -179,6 +182,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
3 changes: 3 additions & 0 deletions tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@
" self.conv2tr2 = layers.Conv2DTranspose(1, 5, strides=2, padding='SAME')\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def call(self, x, is_training=False):\n",
" x = self.fc1(x)\n",
" x = self.bn1(x, training=is_training)\n",
Expand Down Expand Up @@ -147,6 +148,7 @@
" self.fc2 = layers.Dense(2)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def call(self, x, is_training=False):\n",
" x = tf.reshape(x, [-1, 28, 28, 1])\n",
" x = self.conv1(x)\n",
Expand Down Expand Up @@ -197,6 +199,7 @@
"outputs": [],
"source": [
"# Optimization process. Inputs: real image and noise.\n",
"@tf.function\n",
"def run_optimization(real_images):\n",
" \n",
" # Rescale to [-1, 1], the input range of the discriminator\n",
Expand Down
4 changes: 4 additions & 0 deletions tensorflow_v2/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@
" self.out = layers.Dense(num_classes)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def call(self, x, is_training=False):\n",
" # A RNN Layer expects a 3-dim input (batch_size, seq_len, num_features).\n",
" x = tf.reshape(x, shape=[-1, seq_max_len, 1])\n",
Expand Down Expand Up @@ -168,6 +169,7 @@
"source": [
"# Cross-Entropy Loss.\n",
"# Note that this will apply 'softmax' to the logits.\n",
"@tf.function\n",
"def cross_entropy_loss(x, y):\n",
" # Convert labels to int 64 for tf cross-entropy function.\n",
" y = tf.cast(y, tf.int64)\n",
Expand All @@ -177,6 +179,7 @@
" return tf.reduce_mean(loss)\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -193,6 +196,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
4 changes: 4 additions & 0 deletions tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@
" self.out = layers.Dense(num_classes)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def call(self, x, is_training=False):\n",
" x = self.fc1(x)\n",
" x = self.fc2(x)\n",
Expand All @@ -136,6 +137,7 @@
"source": [
"# Cross-Entropy Loss.\n",
"# Note that this will apply 'softmax' to the logits.\n",
"@tf.function\n",
"def cross_entropy_loss(x, y):\n",
" # Convert labels to int 64 for tf cross-entropy function.\n",
" y = tf.cast(y, tf.int64)\n",
Expand All @@ -145,6 +147,7 @@
" return tf.reduce_mean(loss)\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -161,6 +164,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@
" self.out = layers.Dense(num_classes)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def call(self, x, is_training=False):\n",
" # LSTM layer.\n",
" x = self.lstm_layer(x)\n",
Expand All @@ -138,6 +139,7 @@
"source": [
"# Cross-Entropy Loss.\n",
"# Note that this will apply 'softmax' to the logits.\n",
"@tf.function\n",
"def cross_entropy_loss(x, y):\n",
" # Convert labels to int 64 for tf cross-entropy function.\n",
" y = tf.cast(y, tf.int64)\n",
Expand All @@ -147,6 +149,7 @@
" return tf.reduce_mean(loss)\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -163,6 +166,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
5 changes: 5 additions & 0 deletions tensorflow_v2/notebooks/4_Utils/build_custom_layers.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@
" # Make sure to call the `build` method at the end\n",
" super(CustomLayer1, self).build(input_shape)\n",
"\n",
" @tf.function\n",
" def call(self, inputs):\n",
" x = tf.matmul(inputs, self.weight)\n",
" x = x + self.bias\n",
Expand Down Expand Up @@ -188,6 +189,7 @@
" self.out = layers.Dense(num_classes, activation=tf.nn.softmax)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def __call__(self, x, is_training=False):\n",
" x = self.layer1(x)\n",
" x = tf.nn.relu(x)\n",
Expand All @@ -209,12 +211,14 @@
"outputs": [],
"source": [
"# Cross-Entropy loss function.\n",
"@tf.function\n",
"def cross_entropy(y_pred, y_true):\n",
" y_true = tf.cast(y_true, tf.int64)\n",
" crossentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n",
" return tf.reduce_mean(crossentropy)\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -231,6 +235,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
8 changes: 8 additions & 0 deletions tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,13 @@
"b = tf.Variable(tf.zeros([num_classes]), name=\"bias\")\n",
"\n",
"# Logistic regression (Wx + b).\n",
"@tf.function\n",
"def logistic_regression(x):\n",
" # Apply softmax to normalize the logits to a probability distribution.\n",
" return tf.nn.softmax(tf.matmul(x, W) + b)\n",
"\n",
"# Cross-Entropy loss function.\n",
"@tf.function\n",
"def cross_entropy(y_pred, y_true):\n",
" # Encode label to a one hot vector.\n",
" y_true = tf.one_hot(y_true, depth=num_classes)\n",
Expand All @@ -109,6 +111,7 @@
" return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -125,6 +128,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down Expand Up @@ -380,6 +384,7 @@
" self.out = layers.Dense(num_classes, activation=tf.nn.softmax)\n",
"\n",
" # Set forward pass.\n",
" @tf.function\n",
" def __call__(self, x, is_training=False):\n",
" x = self.fc1(x)\n",
" x = self.out(x)\n",
Expand All @@ -400,12 +405,14 @@
"outputs": [],
"source": [
"# Cross-Entropy loss function.\n",
"@tf.function\n",
"def cross_entropy(y_pred, y_true):\n",
" y_true = tf.cast(y_true, tf.int64)\n",
" crossentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n",
" return tf.reduce_mean(crossentropy)\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
" correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n",
Expand All @@ -422,6 +429,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down
3 changes: 3 additions & 0 deletions tensorflow_v2/notebooks/4_Utils/tensorboard.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@
"outputs": [],
"source": [
"# Cross-Entropy loss function.\n",
"@tf.function\n",
"def cross_entropy(y_pred, y_true):\n",
" with tf.name_scope('CrossEntropyLoss'):\n",
" # Encode label to a one hot vector.\n",
Expand All @@ -146,6 +147,7 @@
" return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))\n",
"\n",
"# Accuracy metric.\n",
"@tf.function\n",
"def accuracy(y_pred, y_true):\n",
" with tf.name_scope('Accuracy'):\n",
" # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n",
Expand All @@ -164,6 +166,7 @@
"outputs": [],
"source": [
"# Optimization process. \n",
"@tf.function\n",
"def run_optimization(x, y):\n",
" # Wrap computation inside a GradientTape for automatic differentiation.\n",
" with tf.GradientTape() as g:\n",
Expand Down