Skip to content

Commit

Permalink
Update files for researcher guide
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Jul 19, 2020
1 parent e239c5e commit f8d5db9
Show file tree
Hide file tree
Showing 2 changed files with 87 additions and 139 deletions.
70 changes: 22 additions & 48 deletions guides/ipynb/intro_to_keras_for_researchers.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@
"outputs": [],
"source": [
"import tensorflow as tf\n",
"from tensorflow import keras\n",
""
"from tensorflow import keras"
]
},
{
Expand All @@ -59,7 +58,7 @@
"- The Keras Functional API\n",
"\n",
"You will also see the Keras API in action in two end-to-end research examples:\n",
"a Variational Autoencoder, an a Hypernetwork."
"a Variational Autoencoder, and a Hypernetwork."
]
},
{
Expand Down Expand Up @@ -103,7 +102,6 @@
"\n",
" def call(self, inputs):\n",
" return tf.matmul(inputs, self.w) + self.b\n",
"\n",
""
]
},
Expand All @@ -130,8 +128,7 @@
"# The layer can be treated as a function.\n",
"# Here we call it on some data.\n",
"y = linear_layer(tf.ones((2, 2)))\n",
"assert y.shape == (2, 4)\n",
""
"assert y.shape == (2, 4)"
]
},
{
Expand All @@ -152,8 +149,7 @@
},
"outputs": [],
"source": [
"assert linear_layer.weights == [linear_layer.w, linear_layer.b]\n",
""
"assert linear_layer.weights == [linear_layer.w, linear_layer.b]"
]
},
{
Expand Down Expand Up @@ -212,8 +208,7 @@
"linear_layer = Linear(4)\n",
"\n",
"# This will also call `build(input_shape)` and create the weights.\n",
"y = linear_layer(tf.ones((2, 2)))\n",
""
"y = linear_layer(tf.ones((2, 2)))"
]
},
{
Expand Down Expand Up @@ -274,8 +269,7 @@
"\n",
" # Logging.\n",
" if step % 100 == 0:\n",
" print(\"Step:\", step, \"Loss:\", float(loss))\n",
""
" print(\"Step:\", step, \"Loss:\", float(loss))"
]
},
{
Expand Down Expand Up @@ -324,8 +318,7 @@
"\n",
"assert my_sum.weights == [my_sum.total]\n",
"assert my_sum.non_trainable_weights == [my_sum.total]\n",
"assert my_sum.trainable_weights == []\n",
""
"assert my_sum.trainable_weights == []"
]
},
{
Expand Down Expand Up @@ -376,8 +369,7 @@
"y = mlp(tf.ones(shape=(3, 64)))\n",
"\n",
"# Weights are recursively tracked.\n",
"assert len(mlp.weights) == 6\n",
""
"assert len(mlp.weights) == 6"
]
},
{
Expand All @@ -404,8 +396,7 @@
" keras.layers.Dense(32, activation=tf.nn.relu),\n",
" keras.layers.Dense(10),\n",
" ]\n",
")\n",
""
")"
]
},
{
Expand Down Expand Up @@ -444,7 +435,6 @@
" # that depends on the inputs.\n",
" self.add_loss(self.rate * tf.reduce_sum(inputs))\n",
" return inputs\n",
"\n",
""
]
},
Expand Down Expand Up @@ -487,8 +477,7 @@
"mlp = SparseMLP()\n",
"y = mlp(tf.ones((10, 10)))\n",
"\n",
"print(mlp.losses) # List containing one float32 scalar\n",
""
"print(mlp.losses) # List containing one float32 scalar"
]
},
{
Expand All @@ -500,8 +489,7 @@
"These losses are cleared by the top-level layer at the start of each forward\n",
"pass -- they don't accumulate. `layer.losses` always contains only the losses\n",
"created during the last forward pass. You would typically use these losses by\n",
"summing them before computing your gradients when writing a training loop.\n",
""
"summing them before computing your gradients when writing a training loop."
]
},
{
Expand Down Expand Up @@ -555,8 +543,7 @@
"\n",
" # Logging.\n",
" if step % 100 == 0:\n",
" print(\"Step:\", step, \"Loss:\", float(loss))\n",
""
" print(\"Step:\", step, \"Loss:\", float(loss))"
]
},
{
Expand Down Expand Up @@ -624,9 +611,8 @@
" print(\"Epoch:\", epoch, \"Step:\", step)\n",
" print(\"Total running accuracy so far: %.3f\" % accuracy.result())\n",
"\n",
" # Result the metric's state at the end of an epoch\n",
" accuracy.reset_states()\n",
""
" # Reset the metric's state at the end of an epoch\n",
" accuracy.reset_states()"
]
},
{
Expand Down Expand Up @@ -697,8 +683,7 @@
"for step, (x, y) in enumerate(dataset):\n",
" loss = train_on_batch(x, y)\n",
" if step % 100 == 0:\n",
" print(\"Step:\", step, \"Loss:\", float(loss))\n",
""
" print(\"Step:\", step, \"Loss:\", float(loss))"
]
},
{
Expand Down Expand Up @@ -755,8 +740,7 @@
"\n",
"mlp = MLPWithDropout()\n",
"y_train = mlp(tf.ones((2, 2)), training=True)\n",
"y_test = mlp(tf.ones((2, 2)), training=False)\n",
""
"y_test = mlp(tf.ones((2, 2)), training=False)"
]
},
{
Expand Down Expand Up @@ -808,8 +792,7 @@
"\n",
"# You can pass a `training` argument in `__call__`\n",
"# (it will get passed down to the Dropout layer).\n",
"y = model(tf.ones((2, 16)), training=True)\n",
""
"y = model(tf.ones((2, 16)), training=True)"
]
},
{
Expand Down Expand Up @@ -911,7 +894,6 @@
" z_log_var = self.dense_log_var(x)\n",
" z = self.sampling((z_mean, z_log_var))\n",
" return z_mean, z_log_var, z\n",
"\n",
""
]
},
Expand Down Expand Up @@ -945,7 +927,6 @@
" def call(self, inputs):\n",
" x = self.dense_proj(inputs)\n",
" return self.dense_output(x)\n",
"\n",
""
]
},
Expand Down Expand Up @@ -986,7 +967,6 @@
" )\n",
" self.add_loss(kl_loss)\n",
" return reconstructed\n",
"\n",
""
]
},
Expand Down Expand Up @@ -1048,8 +1028,7 @@
" # Training the model to convergence is left\n",
" # as an exercise to the reader.\n",
" if step >= 1000:\n",
" break\n",
""
" break"
]
},
{
Expand Down Expand Up @@ -1100,8 +1079,7 @@
"\n",
"# Add KL divergence regularization loss.\n",
"kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)\n",
"vae.add_loss(kl_loss)\n",
""
"vae.add_loss(kl_loss)"
]
},
{
Expand Down Expand Up @@ -1140,8 +1118,7 @@
"vae.compile(optimizer, loss=loss_fn)\n",
"\n",
"# Actually training the model.\n",
"vae.fit(dataset, epochs=1)\n",
""
"vae.fit(dataset, epochs=1)"
]
},
{
Expand Down Expand Up @@ -1173,7 +1150,6 @@
"\n",
"Let's implement a really trivial hypernetwork: we'll use a small 2-layer network to\n",
"generate the weights of a larger 3-layer network.\n",
"\n",
""
]
},
Expand Down Expand Up @@ -1210,8 +1186,7 @@
" keras.layers.Dense(16, activation=tf.nn.relu),\n",
" keras.layers.Dense(num_weights_to_generate, activation=tf.nn.sigmoid),\n",
" ]\n",
")\n",
""
")"
]
},
{
Expand Down Expand Up @@ -1309,8 +1284,7 @@
" # Training the model to convergence is left\n",
" # as an exercise to the reader.\n",
" if step >= 1000:\n",
" break\n",
""
" break"
]
},
{
Expand Down
Loading

0 comments on commit f8d5db9

Please sign in to comment.