From 4f476646aa8899eb0b269d0f7d54448bd31a6150 Mon Sep 17 00:00:00 2001 From: Yash Katariya Date: Wed, 17 Jun 2020 13:55:01 -0700 Subject: [PATCH] Remove \n, ' ', and '' again (#101) * Remove \n, ' ', and '' again * Use if not lines[0].strip --- scripts/generate_tf_guides.py | 12 +++++-- tf/custom_callback.ipynb | 9 ++---- tf/custom_layers_and_models.ipynb | 32 ++++--------------- tf/customizing_what_happens_in_fit.ipynb | 10 ++---- tf/functional.ipynb | 3 -- tf/masking_and_padding.ipynb | 11 ++----- tf/rnn.ipynb | 13 +++----- tf/save_and_serialize.ipynb | 8 ----- tf/train_and_evaluate.ipynb | 17 ++-------- tf/writing_a_training_loop_from_scratch.ipynb | 19 +++-------- 10 files changed, 34 insertions(+), 100 deletions(-) diff --git a/scripts/generate_tf_guides.py b/scripts/generate_tf_guides.py index bf0b14f6c9..5049c98ea7 100644 --- a/scripts/generate_tf_guides.py +++ b/scripts/generate_tf_guides.py @@ -136,9 +136,8 @@ def generate_single_tf_guide(source_dir, target_dir, title, source_name, target_name): - f = open(Path(source_dir) / (source_name + ".ipynb")) - original_ipynb = json.loads(f.read()) - f.close() + nb = (Path(source_dir) / source_name).with_suffix(".ipynb") + original_ipynb = json.loads(nb.read_text()) # Skip first title cell cells = original_ipynb["cells"][1:] @@ -164,6 +163,13 @@ def generate_single_tf_guide(source_dir, target_dir, title, source_name, target_ if len(lines) < 2: new_lines.append(lines[-1]) cell["source"] = new_lines + elif cell["cell_type"] == "code": + lines = cell["source"] + if not lines[0].strip(): + lines = lines[1:] + if not lines[-1].strip(): + lines = lines[:-1] + cell["source"] = lines # Add header cells header_cells = copy.deepcopy(TF_IPYNB_CELLS_TEMPLATE) diff --git a/tf/custom_callback.ipynb b/tf/custom_callback.ipynb index 2175ce0857..7a2c61ac82 100644 --- a/tf/custom_callback.ipynb +++ b/tf/custom_callback.ipynb @@ -203,8 +203,7 @@ " loss=\"mean_squared_error\",\n", " metrics=[\"mean_absolute_error\"],\n", " )\n", - " return model\n", - "" + " return model\n" ] }, { @@ -263,7 +262,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomCallback(keras.callbacks.Callback):\n", " def on_train_begin(self, logs=None):\n", " keys = list(logs.keys())\n", @@ -319,8 +317,7 @@ "\n", " def on_predict_batch_end(self, batch, logs=None):\n", " keys = list(logs.keys())\n", - " print(\"...Predicting: end of batch {}; got log keys: {}\".format(batch, keys))\n", - "" + " print(\"...Predicting: end of batch {}; got log keys: {}\".format(batch, keys))\n" ] }, { @@ -381,7 +378,6 @@ }, "outputs": [], "source": [ - "\n", "class LossAndErrorPrintingCallback(keras.callbacks.Callback):\n", " def on_train_batch_end(self, batch, logs=None):\n", " print(\"For batch {}, loss is {:7.2f}.\".format(batch, logs[\"loss\"]))\n", @@ -562,7 +558,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomLearningRateScheduler(keras.callbacks.Callback):\n", " \"\"\"Learning rate scheduler which sets the learning rate according to schedule.\n", "\n", diff --git a/tf/custom_layers_and_models.ipynb b/tf/custom_layers_and_models.ipynb index 9ce466b235..83773b641d 100644 --- a/tf/custom_layers_and_models.ipynb +++ b/tf/custom_layers_and_models.ipynb @@ -115,7 +115,6 @@ }, "outputs": [], "source": [ - "\n", "class Linear(keras.layers.Layer):\n", " def __init__(self, units=32, input_dim=32):\n", " super(Linear, self).__init__()\n", @@ -130,8 +129,7 @@ " )\n", "\n", " def call(self, inputs):\n", - " return tf.matmul(inputs, self.w) + self.b\n", - "" + " return tf.matmul(inputs, self.w) + self.b\n" ] }, { @@ -203,7 +201,6 @@ }, "outputs": [], "source": [ - "\n", "class Linear(keras.layers.Layer):\n", " def __init__(self, units=32, input_dim=32):\n", " super(Linear, self).__init__()\n", @@ -247,7 +244,6 @@ }, "outputs": [], "source": [ - "\n", "class ComputeSum(keras.layers.Layer):\n", " def __init__(self, input_dim):\n", " super(ComputeSum, self).__init__()\n", @@ -314,7 +310,6 @@ }, "outputs": [], "source": [ - "\n", "class Linear(keras.layers.Layer):\n", " def __init__(self, units=32, input_dim=32):\n", " super(Linear, self).__init__()\n", @@ -324,8 +319,7 @@ " self.b = self.add_weight(shape=(units,), initializer=\"zeros\", trainable=True)\n", "\n", " def call(self, inputs):\n", - " return tf.matmul(inputs, self.w) + self.b\n", - "" + " return tf.matmul(inputs, self.w) + self.b\n" ] }, { @@ -352,7 +346,6 @@ }, "outputs": [], "source": [ - "\n", "class Linear(keras.layers.Layer):\n", " def __init__(self, units=32):\n", " super(Linear, self).__init__()\n", @@ -369,8 +362,7 @@ " )\n", "\n", " def call(self, inputs):\n", - " return tf.matmul(inputs, self.w) + self.b\n", - "" + " return tf.matmul(inputs, self.w) + self.b\n" ] }, { @@ -482,8 +474,7 @@ "\n", " def call(self, inputs):\n", " self.add_loss(self.rate * tf.reduce_sum(inputs))\n", - " return inputs\n", - "" + " return inputs\n" ] }, { @@ -508,7 +499,6 @@ }, "outputs": [], "source": [ - "\n", "class OuterLayer(keras.layers.Layer):\n", " def __init__(self):\n", " super(OuterLayer, self).__init__()\n", @@ -549,7 +539,6 @@ }, "outputs": [], "source": [ - "\n", "class OuterLayerWithKernelRegularizer(keras.layers.Layer):\n", " def __init__(self):\n", " super(OuterLayerWithKernelRegularizer, self).__init__()\n", @@ -666,7 +655,6 @@ }, "outputs": [], "source": [ - "\n", "class LogisticEndpoint(keras.layers.Layer):\n", " def __init__(self, name=None):\n", " super(LogisticEndpoint, self).__init__(name=name)\n", @@ -685,8 +673,7 @@ " self.add_metric(acc, name=\"accuracy\")\n", "\n", " # Return the inference-time prediction tensor (for `.predict()`).\n", - " return tf.nn.softmax(logits)\n", - "" + " return tf.nn.softmax(logits)\n" ] }, { @@ -775,7 +762,6 @@ }, "outputs": [], "source": [ - "\n", "class Linear(keras.layers.Layer):\n", " def __init__(self, units=32):\n", " super(Linear, self).__init__()\n", @@ -827,7 +813,6 @@ }, "outputs": [], "source": [ - "\n", "class Linear(keras.layers.Layer):\n", " def __init__(self, units=32, **kwargs):\n", " super(Linear, self).__init__(**kwargs)\n", @@ -906,7 +891,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomDropout(keras.layers.Layer):\n", " def __init__(self, rate, **kwargs):\n", " super(CustomDropout, self).__init__(**kwargs)\n", @@ -915,8 +899,7 @@ " def call(self, inputs, training=None):\n", " if training:\n", " return tf.nn.dropout(inputs, rate=self.rate)\n", - " return inputs\n", - "" + " return inputs\n" ] }, { @@ -1122,8 +1105,7 @@ " z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1\n", " )\n", " self.add_loss(kl_loss)\n", - " return reconstructed\n", - "" + " return reconstructed\n" ] }, { diff --git a/tf/customizing_what_happens_in_fit.ipynb b/tf/customizing_what_happens_in_fit.ipynb index 0fde25ed31..4b2c45acf7 100644 --- a/tf/customizing_what_happens_in_fit.ipynb +++ b/tf/customizing_what_happens_in_fit.ipynb @@ -170,7 +170,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomModel(keras.Model):\n", " def train_step(self, data):\n", " # Unpack the data. Its structure depends on your model and\n", @@ -191,8 +190,7 @@ " # Update metrics (includes the metric that tracks the loss)\n", " self.compiled_metrics.update_state(y, y_pred)\n", " # Return a dict mapping metric names to current value\n", - " return {m.name: m.result() for m in self.metrics}\n", - "" + " return {m.name: m.result() for m in self.metrics}\n" ] }, { @@ -319,7 +317,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomModel(keras.Model):\n", " def train_step(self, data):\n", " # Unpack the data. Its structure depends on your model and\n", @@ -391,7 +388,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomModel(keras.Model):\n", " def test_step(self, data):\n", " # Unpack the data\n", @@ -504,7 +500,6 @@ }, "outputs": [], "source": [ - "\n", "class GAN(keras.Model):\n", " def __init__(self, discriminator, generator, latent_dim):\n", " super(GAN, self).__init__()\n", @@ -560,8 +555,7 @@ " g_loss = self.loss_fn(misleading_labels, predictions)\n", " grads = tape.gradient(g_loss, self.generator.trainable_weights)\n", " self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))\n", - " return {\"d_loss\": d_loss, \"g_loss\": g_loss}\n", - "" + " return {\"d_loss\": d_loss, \"g_loss\": g_loss}\n" ] }, { diff --git a/tf/functional.ipynb b/tf/functional.ipynb index 8ccbcda299..e3b41305d9 100644 --- a/tf/functional.ipynb +++ b/tf/functional.ipynb @@ -613,7 +613,6 @@ }, "outputs": [], "source": [ - "\n", "def get_model():\n", " inputs = keras.Input(shape=(128,))\n", " outputs = layers.Dense(1)(inputs)\n", @@ -1129,7 +1128,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomDense(layers.Layer):\n", " def __init__(self, units=32):\n", " super(CustomDense, self).__init__()\n", @@ -1175,7 +1173,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomDense(layers.Layer):\n", " def __init__(self, units=32):\n", " super(CustomDense, self).__init__()\n", diff --git a/tf/masking_and_padding.ipynb b/tf/masking_and_padding.ipynb index 901eba54a1..49914bb693 100644 --- a/tf/masking_and_padding.ipynb +++ b/tf/masking_and_padding.ipynb @@ -177,8 +177,7 @@ "padded_inputs = tf.keras.preprocessing.sequence.pad_sequences(\n", " raw_inputs, padding=\"post\"\n", ")\n", - "print(padded_inputs)\n", - "" + "print(padded_inputs)\n" ] }, { @@ -346,7 +345,6 @@ }, "outputs": [], "source": [ - "\n", "class MyLayer(layers.Layer):\n", " def __init__(self, **kwargs):\n", " super(MyLayer, self).__init__(**kwargs)\n", @@ -409,7 +407,6 @@ }, "outputs": [], "source": [ - "\n", "class TemporalSplit(keras.layers.Layer):\n", " \"\"\"Split the input tensor into 2 tensors along the time dimension.\"\"\"\n", "\n", @@ -450,7 +447,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomEmbedding(keras.layers.Layer):\n", " def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs):\n", " super(CustomEmbedding, self).__init__(**kwargs)\n", @@ -516,7 +512,6 @@ }, "outputs": [], "source": [ - "\n", "class MyActivation(keras.layers.Layer):\n", " def __init__(self, **kwargs):\n", " super(MyActivation, self).__init__(**kwargs)\n", @@ -524,8 +519,7 @@ " self.supports_masking = True\n", "\n", " def call(self, inputs):\n", - " return tf.nn.relu(inputs)\n", - "" + " return tf.nn.relu(inputs)\n" ] }, { @@ -587,7 +581,6 @@ }, "outputs": [], "source": [ - "\n", "class TemporalSoftmax(keras.layers.Layer):\n", " def call(self, inputs, mask=None):\n", " broadcast_float_mask = tf.expand_dims(tf.cast(mask, \"float32\"), -1)\n", diff --git a/tf/rnn.ipynb b/tf/rnn.ipynb index 149985c9fa..32942e2698 100644 --- a/tf/rnn.ipynb +++ b/tf/rnn.ipynb @@ -412,8 +412,7 @@ "\n", "# reset_states() will reset the cached state to the original initial_state.\n", "# If no initial_state was provided, zero-states will be used by default.\n", - "lstm_layer.reset_states()\n", - "" + "lstm_layer.reset_states()\n" ] }, { @@ -465,8 +464,7 @@ "existing_state = lstm_layer.states\n", "\n", "new_lstm_layer = layers.LSTM(64)\n", - "new_output = new_lstm_layer(paragraph3, initial_state=existing_state)\n", - "" + "new_output = new_lstm_layer(paragraph3, initial_state=existing_state)\n" ] }, { @@ -610,8 +608,7 @@ " keras.layers.Dense(output_size),\n", " ]\n", " )\n", - " return model\n", - "" + " return model\n" ] }, { @@ -803,7 +800,6 @@ }, "outputs": [], "source": [ - "\n", "class NestedCell(keras.layers.Layer):\n", " def __init__(self, unit_1, unit_2, unit_3, **kwargs):\n", " self.unit_1 = unit_1\n", @@ -845,8 +841,7 @@ " return output, new_states\n", "\n", " def get_config(self):\n", - " return {\"unit_1\": self.unit_1, \"unit_2\": unit_2, \"unit_3\": self.unit_3}\n", - "" + " return {\"unit_1\": self.unit_1, \"unit_2\": unit_2, \"unit_3\": self.unit_3}\n" ] }, { diff --git a/tf/save_and_serialize.ipynb b/tf/save_and_serialize.ipynb index 581886a435..0913f14224 100644 --- a/tf/save_and_serialize.ipynb +++ b/tf/save_and_serialize.ipynb @@ -202,7 +202,6 @@ }, "outputs": [], "source": [ - "\n", "def get_model():\n", " # Create a simple model.\n", " inputs = keras.Input(shape=(32,))\n", @@ -305,7 +304,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomModel(keras.Model):\n", " def __init__(self, hidden_units):\n", " super(CustomModel, self).__init__()\n", @@ -669,7 +667,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomLayer(keras.layers.Layer):\n", " def __init__(self, a):\n", " self.var = tf.Variable(a, name=\"var_a\")\n", @@ -746,7 +743,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomLayer(keras.layers.Layer):\n", " def __init__(self, units=32, **kwargs):\n", " super(CustomLayer, self).__init__(**kwargs)\n", @@ -868,7 +864,6 @@ }, "outputs": [], "source": [ - "\n", "def create_layer():\n", " layer = keras.layers.Dense(64, activation=\"relu\", name=\"dense_2\")\n", " layer.build((None, 784))\n", @@ -1080,7 +1075,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomLayer(keras.layers.Layer):\n", " def __init__(self, a):\n", " self.var = tf.Variable(a, name=\"var_a\")\n", @@ -1301,7 +1295,6 @@ }, "outputs": [], "source": [ - "\n", "class NestedDenseLayer(keras.layers.Layer):\n", " def __init__(self, units, name=None):\n", " super(NestedDenseLayer, self).__init__(name=name)\n", @@ -1349,7 +1342,6 @@ }, "outputs": [], "source": [ - "\n", "def create_functional_model():\n", " inputs = keras.Input(shape=(784,), name=\"digits\")\n", " x = keras.layers.Dense(64, activation=\"relu\", name=\"dense_1\")(inputs)\n", diff --git a/tf/train_and_evaluate.ipynb b/tf/train_and_evaluate.ipynb index 36db45ce7b..4ac7a5b863 100644 --- a/tf/train_and_evaluate.ipynb +++ b/tf/train_and_evaluate.ipynb @@ -408,7 +408,6 @@ }, "outputs": [], "source": [ - "\n", "def get_uncompiled_model():\n", " inputs = keras.Input(shape=(784,), name=\"digits\")\n", " x = layers.Dense(64, activation=\"relu\", name=\"dense_1\")(inputs)\n", @@ -425,8 +424,7 @@ " loss=\"sparse_categorical_crossentropy\",\n", " metrics=[\"sparse_categorical_accuracy\"],\n", " )\n", - " return model\n", - "" + " return model\n" ] }, { @@ -487,7 +485,6 @@ }, "outputs": [], "source": [ - "\n", "def custom_mean_squared_error(y_true, y_pred):\n", " return tf.math.reduce_mean(tf.square(y_true - y_pred))\n", "\n", @@ -532,7 +529,6 @@ }, "outputs": [], "source": [ - "\n", "class CustomMSE(keras.losses.Loss):\n", " def __init__(self, regularization_factor=0.1, name=\"custom_mse\"):\n", " super().__init__(name=name)\n", @@ -587,7 +583,6 @@ }, "outputs": [], "source": [ - "\n", "class CategoricalTruePositives(keras.metrics.Metric):\n", " def __init__(self, name=\"categorical_true_positives\", **kwargs):\n", " super(CategoricalTruePositives, self).__init__(name=name, **kwargs)\n", @@ -649,7 +644,6 @@ }, "outputs": [], "source": [ - "\n", "class ActivityRegularizationLayer(layers.Layer):\n", " def call(self, inputs):\n", " self.add_loss(tf.reduce_sum(inputs) * 0.1)\n", @@ -695,7 +689,6 @@ }, "outputs": [], "source": [ - "\n", "class MetricLoggingLayer(layers.Layer):\n", " def call(self, inputs):\n", " # The `aggregation` argument defines\n", @@ -789,7 +782,6 @@ }, "outputs": [], "source": [ - "\n", "class LogisticEndpoint(keras.layers.Layer):\n", " def __init__(self, name=None):\n", " super(LogisticEndpoint, self).__init__(name=name)\n", @@ -808,8 +800,7 @@ " self.add_metric(acc, name=\"accuracy\")\n", "\n", " # Return the inference-time prediction tensor (for `.predict()`).\n", - " return tf.nn.softmax(logits)\n", - "" + " return tf.nn.softmax(logits)\n" ] }, { @@ -1733,14 +1724,12 @@ }, "outputs": [], "source": [ - "\n", "class LossHistory(keras.callbacks.Callback):\n", " def on_train_begin(self, logs):\n", " self.per_batch_losses = []\n", "\n", " def on_batch_end(self, batch, logs):\n", - " self.per_batch_losses.append(logs.get(\"loss\"))\n", - "" + " self.per_batch_losses.append(logs.get(\"loss\"))\n" ] }, { diff --git a/tf/writing_a_training_loop_from_scratch.ipynb b/tf/writing_a_training_loop_from_scratch.ipynb index 62e50eed6b..b0aaa8d7ea 100644 --- a/tf/writing_a_training_loop_from_scratch.ipynb +++ b/tf/writing_a_training_loop_from_scratch.ipynb @@ -408,7 +408,6 @@ }, "outputs": [], "source": [ - "\n", "@tf.function\n", "def train_step(x, y):\n", " with tf.GradientTape() as tape:\n", @@ -417,8 +416,7 @@ " grads = tape.gradient(loss_value, model.trainable_weights)\n", " optimizer.apply_gradients(zip(grads, model.trainable_weights))\n", " train_acc_metric.update_state(y, logits)\n", - " return loss_value\n", - "" + " return loss_value\n" ] }, { @@ -440,12 +438,10 @@ }, "outputs": [], "source": [ - "\n", "@tf.function\n", "def test_step(x, y):\n", " val_logits = model(x, training=False)\n", - " val_acc_metric.update_state(y, val_logits)\n", - "" + " val_acc_metric.update_state(y, val_logits)\n" ] }, { @@ -542,12 +538,10 @@ }, "outputs": [], "source": [ - "\n", "class ActivityRegularizationLayer(layers.Layer):\n", " def call(self, inputs):\n", " self.add_loss(1e-2 * tf.reduce_sum(inputs))\n", - " return inputs\n", - "" + " return inputs\n" ] }, { @@ -598,7 +592,6 @@ }, "outputs": [], "source": [ - "\n", "@tf.function\n", "def train_step(x, y):\n", " with tf.GradientTape() as tape:\n", @@ -609,8 +602,7 @@ " grads = tape.gradient(loss_value, model.trainable_weights)\n", " optimizer.apply_gradients(zip(grads, model.trainable_weights))\n", " train_acc_metric.update_state(y, logits)\n", - " return loss_value\n", - "" + " return loss_value\n" ] }, { @@ -796,8 +788,7 @@ " g_loss = loss_fn(misleading_labels, predictions)\n", " grads = tape.gradient(g_loss, generator.trainable_weights)\n", " g_optimizer.apply_gradients(zip(grads, generator.trainable_weights))\n", - " return d_loss, g_loss, generated_images\n", - "" + " return d_loss, g_loss, generated_images\n" ] }, {