Skip to content

Commit

Permalink
Remove \n, ' ', and '' again (keras-team#101)
Browse files Browse the repository at this point in the history
* Remove \n, ' ', and '' again

* Use if not lines[0].strip
  • Loading branch information
yashk2810 authored Jun 17, 2020
1 parent 7bc69cb commit 4f47664
Show file tree
Hide file tree
Showing 10 changed files with 34 additions and 100 deletions.
12 changes: 9 additions & 3 deletions scripts/generate_tf_guides.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,9 +136,8 @@


def generate_single_tf_guide(source_dir, target_dir, title, source_name, target_name):
f = open(Path(source_dir) / (source_name + ".ipynb"))
original_ipynb = json.loads(f.read())
f.close()
nb = (Path(source_dir) / source_name).with_suffix(".ipynb")
original_ipynb = json.loads(nb.read_text())

# Skip first title cell
cells = original_ipynb["cells"][1:]
Expand All @@ -164,6 +163,13 @@ def generate_single_tf_guide(source_dir, target_dir, title, source_name, target_
if len(lines) < 2:
new_lines.append(lines[-1])
cell["source"] = new_lines
elif cell["cell_type"] == "code":
lines = cell["source"]
if not lines[0].strip():
lines = lines[1:]
if not lines[-1].strip():
lines = lines[:-1]
cell["source"] = lines

# Add header cells
header_cells = copy.deepcopy(TF_IPYNB_CELLS_TEMPLATE)
Expand Down
9 changes: 2 additions & 7 deletions tf/custom_callback.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,7 @@
" loss=\"mean_squared_error\",\n",
" metrics=[\"mean_absolute_error\"],\n",
" )\n",
" return model\n",
""
" return model\n"
]
},
{
Expand Down Expand Up @@ -263,7 +262,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomCallback(keras.callbacks.Callback):\n",
" def on_train_begin(self, logs=None):\n",
" keys = list(logs.keys())\n",
Expand Down Expand Up @@ -319,8 +317,7 @@
"\n",
" def on_predict_batch_end(self, batch, logs=None):\n",
" keys = list(logs.keys())\n",
" print(\"...Predicting: end of batch {}; got log keys: {}\".format(batch, keys))\n",
""
" print(\"...Predicting: end of batch {}; got log keys: {}\".format(batch, keys))\n"
]
},
{
Expand Down Expand Up @@ -381,7 +378,6 @@
},
"outputs": [],
"source": [
"\n",
"class LossAndErrorPrintingCallback(keras.callbacks.Callback):\n",
" def on_train_batch_end(self, batch, logs=None):\n",
" print(\"For batch {}, loss is {:7.2f}.\".format(batch, logs[\"loss\"]))\n",
Expand Down Expand Up @@ -562,7 +558,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomLearningRateScheduler(keras.callbacks.Callback):\n",
" \"\"\"Learning rate scheduler which sets the learning rate according to schedule.\n",
"\n",
Expand Down
32 changes: 7 additions & 25 deletions tf/custom_layers_and_models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@
},
"outputs": [],
"source": [
"\n",
"class Linear(keras.layers.Layer):\n",
" def __init__(self, units=32, input_dim=32):\n",
" super(Linear, self).__init__()\n",
Expand All @@ -130,8 +129,7 @@
" )\n",
"\n",
" def call(self, inputs):\n",
" return tf.matmul(inputs, self.w) + self.b\n",
""
" return tf.matmul(inputs, self.w) + self.b\n"
]
},
{
Expand Down Expand Up @@ -203,7 +201,6 @@
},
"outputs": [],
"source": [
"\n",
"class Linear(keras.layers.Layer):\n",
" def __init__(self, units=32, input_dim=32):\n",
" super(Linear, self).__init__()\n",
Expand Down Expand Up @@ -247,7 +244,6 @@
},
"outputs": [],
"source": [
"\n",
"class ComputeSum(keras.layers.Layer):\n",
" def __init__(self, input_dim):\n",
" super(ComputeSum, self).__init__()\n",
Expand Down Expand Up @@ -314,7 +310,6 @@
},
"outputs": [],
"source": [
"\n",
"class Linear(keras.layers.Layer):\n",
" def __init__(self, units=32, input_dim=32):\n",
" super(Linear, self).__init__()\n",
Expand All @@ -324,8 +319,7 @@
" self.b = self.add_weight(shape=(units,), initializer=\"zeros\", trainable=True)\n",
"\n",
" def call(self, inputs):\n",
" return tf.matmul(inputs, self.w) + self.b\n",
""
" return tf.matmul(inputs, self.w) + self.b\n"
]
},
{
Expand All @@ -352,7 +346,6 @@
},
"outputs": [],
"source": [
"\n",
"class Linear(keras.layers.Layer):\n",
" def __init__(self, units=32):\n",
" super(Linear, self).__init__()\n",
Expand All @@ -369,8 +362,7 @@
" )\n",
"\n",
" def call(self, inputs):\n",
" return tf.matmul(inputs, self.w) + self.b\n",
""
" return tf.matmul(inputs, self.w) + self.b\n"
]
},
{
Expand Down Expand Up @@ -482,8 +474,7 @@
"\n",
" def call(self, inputs):\n",
" self.add_loss(self.rate * tf.reduce_sum(inputs))\n",
" return inputs\n",
""
" return inputs\n"
]
},
{
Expand All @@ -508,7 +499,6 @@
},
"outputs": [],
"source": [
"\n",
"class OuterLayer(keras.layers.Layer):\n",
" def __init__(self):\n",
" super(OuterLayer, self).__init__()\n",
Expand Down Expand Up @@ -549,7 +539,6 @@
},
"outputs": [],
"source": [
"\n",
"class OuterLayerWithKernelRegularizer(keras.layers.Layer):\n",
" def __init__(self):\n",
" super(OuterLayerWithKernelRegularizer, self).__init__()\n",
Expand Down Expand Up @@ -666,7 +655,6 @@
},
"outputs": [],
"source": [
"\n",
"class LogisticEndpoint(keras.layers.Layer):\n",
" def __init__(self, name=None):\n",
" super(LogisticEndpoint, self).__init__(name=name)\n",
Expand All @@ -685,8 +673,7 @@
" self.add_metric(acc, name=\"accuracy\")\n",
"\n",
" # Return the inference-time prediction tensor (for `.predict()`).\n",
" return tf.nn.softmax(logits)\n",
""
" return tf.nn.softmax(logits)\n"
]
},
{
Expand Down Expand Up @@ -775,7 +762,6 @@
},
"outputs": [],
"source": [
"\n",
"class Linear(keras.layers.Layer):\n",
" def __init__(self, units=32):\n",
" super(Linear, self).__init__()\n",
Expand Down Expand Up @@ -827,7 +813,6 @@
},
"outputs": [],
"source": [
"\n",
"class Linear(keras.layers.Layer):\n",
" def __init__(self, units=32, **kwargs):\n",
" super(Linear, self).__init__(**kwargs)\n",
Expand Down Expand Up @@ -906,7 +891,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomDropout(keras.layers.Layer):\n",
" def __init__(self, rate, **kwargs):\n",
" super(CustomDropout, self).__init__(**kwargs)\n",
Expand All @@ -915,8 +899,7 @@
" def call(self, inputs, training=None):\n",
" if training:\n",
" return tf.nn.dropout(inputs, rate=self.rate)\n",
" return inputs\n",
""
" return inputs\n"
]
},
{
Expand Down Expand Up @@ -1122,8 +1105,7 @@
" z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1\n",
" )\n",
" self.add_loss(kl_loss)\n",
" return reconstructed\n",
""
" return reconstructed\n"
]
},
{
Expand Down
10 changes: 2 additions & 8 deletions tf/customizing_what_happens_in_fit.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomModel(keras.Model):\n",
" def train_step(self, data):\n",
" # Unpack the data. Its structure depends on your model and\n",
Expand All @@ -191,8 +190,7 @@
" # Update metrics (includes the metric that tracks the loss)\n",
" self.compiled_metrics.update_state(y, y_pred)\n",
" # Return a dict mapping metric names to current value\n",
" return {m.name: m.result() for m in self.metrics}\n",
""
" return {m.name: m.result() for m in self.metrics}\n"
]
},
{
Expand Down Expand Up @@ -319,7 +317,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomModel(keras.Model):\n",
" def train_step(self, data):\n",
" # Unpack the data. Its structure depends on your model and\n",
Expand Down Expand Up @@ -391,7 +388,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomModel(keras.Model):\n",
" def test_step(self, data):\n",
" # Unpack the data\n",
Expand Down Expand Up @@ -504,7 +500,6 @@
},
"outputs": [],
"source": [
"\n",
"class GAN(keras.Model):\n",
" def __init__(self, discriminator, generator, latent_dim):\n",
" super(GAN, self).__init__()\n",
Expand Down Expand Up @@ -560,8 +555,7 @@
" g_loss = self.loss_fn(misleading_labels, predictions)\n",
" grads = tape.gradient(g_loss, self.generator.trainable_weights)\n",
" self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))\n",
" return {\"d_loss\": d_loss, \"g_loss\": g_loss}\n",
""
" return {\"d_loss\": d_loss, \"g_loss\": g_loss}\n"
]
},
{
Expand Down
3 changes: 0 additions & 3 deletions tf/functional.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -613,7 +613,6 @@
},
"outputs": [],
"source": [
"\n",
"def get_model():\n",
" inputs = keras.Input(shape=(128,))\n",
" outputs = layers.Dense(1)(inputs)\n",
Expand Down Expand Up @@ -1129,7 +1128,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomDense(layers.Layer):\n",
" def __init__(self, units=32):\n",
" super(CustomDense, self).__init__()\n",
Expand Down Expand Up @@ -1175,7 +1173,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomDense(layers.Layer):\n",
" def __init__(self, units=32):\n",
" super(CustomDense, self).__init__()\n",
Expand Down
11 changes: 2 additions & 9 deletions tf/masking_and_padding.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,7 @@
"padded_inputs = tf.keras.preprocessing.sequence.pad_sequences(\n",
" raw_inputs, padding=\"post\"\n",
")\n",
"print(padded_inputs)\n",
""
"print(padded_inputs)\n"
]
},
{
Expand Down Expand Up @@ -346,7 +345,6 @@
},
"outputs": [],
"source": [
"\n",
"class MyLayer(layers.Layer):\n",
" def __init__(self, **kwargs):\n",
" super(MyLayer, self).__init__(**kwargs)\n",
Expand Down Expand Up @@ -409,7 +407,6 @@
},
"outputs": [],
"source": [
"\n",
"class TemporalSplit(keras.layers.Layer):\n",
" \"\"\"Split the input tensor into 2 tensors along the time dimension.\"\"\"\n",
"\n",
Expand Down Expand Up @@ -450,7 +447,6 @@
},
"outputs": [],
"source": [
"\n",
"class CustomEmbedding(keras.layers.Layer):\n",
" def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs):\n",
" super(CustomEmbedding, self).__init__(**kwargs)\n",
Expand Down Expand Up @@ -516,16 +512,14 @@
},
"outputs": [],
"source": [
"\n",
"class MyActivation(keras.layers.Layer):\n",
" def __init__(self, **kwargs):\n",
" super(MyActivation, self).__init__(**kwargs)\n",
" # Signal that the layer is safe for mask propagation\n",
" self.supports_masking = True\n",
"\n",
" def call(self, inputs):\n",
" return tf.nn.relu(inputs)\n",
""
" return tf.nn.relu(inputs)\n"
]
},
{
Expand Down Expand Up @@ -587,7 +581,6 @@
},
"outputs": [],
"source": [
"\n",
"class TemporalSoftmax(keras.layers.Layer):\n",
" def call(self, inputs, mask=None):\n",
" broadcast_float_mask = tf.expand_dims(tf.cast(mask, \"float32\"), -1)\n",
Expand Down
Loading

0 comments on commit 4f47664

Please sign in to comment.