diff --git a/chapter02_mathematical-building-blocks.ipynb b/chapter02_mathematical-building-blocks.ipynb index 7a9acbd0de..01edc9becc 100644 --- a/chapter02_mathematical-building-blocks.ipynb +++ b/chapter02_mathematical-building-blocks.ipynb @@ -383,7 +383,7 @@ "colab_type": "text" }, "source": [ - "### Rank-3 tensors and higher-rank tensors" + "### Rank-3 and higher-rank tensors" ] }, { @@ -1028,7 +1028,7 @@ "colab_type": "text" }, "source": [ - "### Chaining derivatives: the Backpropagation algorithm" + "### Chaining derivatives: The Backpropagation algorithm" ] }, { @@ -1055,7 +1055,7 @@ "colab_type": "text" }, "source": [ - "#### The Gradient Tape in TensorFlow" + "#### The gradient tape in TensorFlow" ] }, { @@ -1337,7 +1337,7 @@ "learning_rate = 1e-3\n", "\n", "def update_weights(gradients, weights):\n", - " for g, w in zip(gradients, model.weights):\n", + " for g, w in zip(gradients, weights):\n", " w.assign_sub(g * learning_rate)" ] }, @@ -1434,7 +1434,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter03_introduction-to-keras-and-tf.ipynb b/chapter03_introduction-to-keras-and-tf.ipynb index 8121203911..69240b5fb2 100644 --- a/chapter03_introduction-to-keras-and-tf.ipynb +++ b/chapter03_introduction-to-keras-and-tf.ipynb @@ -42,7 +42,7 @@ "colab_type": "text" }, "source": [ - "## Keras and TensorFlow: a brief history" + "## Keras and TensorFlow: A brief history" ] }, { @@ -60,7 +60,7 @@ "colab_type": "text" }, "source": [ - "### Jupyter notebooks: the preferred way to run deep-learning experiments" + "### Jupyter notebooks: The preferred way to run deep-learning experiments" ] }, { @@ -87,7 +87,7 @@ "colab_type": "text" }, "source": [ - "#### Installing packages with `pip`" + "#### Installing packages with pip" ] }, { @@ -114,7 +114,7 @@ "colab_type": "text" }, "source": [ - "#### Constant tensors and Variables" + "#### Constant tensors and variables" ] }, { @@ -212,7 +212,7 @@ "colab_type": "text" }, "source": [ - "**Creating a Variable**" + "**Creating a TensorFlow variable**" ] }, { @@ -233,7 +233,7 @@ "colab_type": "text" }, "source": [ - "**Assigning a value to a Variable**" + "**Assigning a value to a TensorFlow variable**" ] }, { @@ -253,7 +253,7 @@ "colab_type": "text" }, "source": [ - "**Assigning a value to a subset of a Variable**" + "**Assigning a value to a subset of a TensorFlow variable**" ] }, { @@ -273,7 +273,7 @@ "colab_type": "text" }, "source": [ - "**Using assign_add**" + "**Using `assign_add`**" ] }, { @@ -293,7 +293,7 @@ "colab_type": "text" }, "source": [ - "#### Tensor operations: doing math in TensorFlow" + "#### Tensor operations: Doing math in TensorFlow" ] }, { @@ -327,7 +327,7 @@ "colab_type": "text" }, "source": [ - "#### A second look at the `GradientTape` API" + "#### A second look at the GradientTape API" ] }, { @@ -336,7 +336,7 @@ "colab_type": "text" }, "source": [ - "**Using the GradientTape**" + "**Using the `GradientTape`**" ] }, { @@ -359,7 +359,7 @@ "colab_type": "text" }, "source": [ - "**Using the GradientTape with constant tensor inputs**" + "**Using `GradientTape` with constant tensor inputs**" ] }, { @@ -408,7 +408,7 @@ "colab_type": "text" }, "source": [ - "#### An end-to-end example: a linear classifier in pure TensorFlow" + "#### An end-to-end example: A linear classifier in pure TensorFlow" ] }, { @@ -430,9 +430,13 @@ "source": [ "num_samples_per_class = 1000\n", "negative_samples = np.random.multivariate_normal(\n", - " mean=[0, 3], cov=[[1, 0.5],[0.5, 1]], size=num_samples_per_class)\n", + " mean=[0, 3],\n", + " cov=[[1, 0.5],[0.5, 1]],\n", + " size=num_samples_per_class)\n", "positive_samples = np.random.multivariate_normal(\n", - " mean=[3, 0], cov=[[1, 0.5],[0.5, 1]], size=num_samples_per_class)" + " mean=[3, 0],\n", + " cov=[[1, 0.5],[0.5, 1]],\n", + " size=num_samples_per_class)" ] }, { @@ -648,7 +652,7 @@ "colab_type": "text" }, "source": [ - "## Anatomy of a neural network: understanding core Keras APIs" + "## Anatomy of a neural network: Understanding core Keras APIs" ] }, { @@ -657,7 +661,7 @@ "colab_type": "text" }, "source": [ - "### Layers: the building blocks of deep learning" + "### Layers: The building blocks of deep learning" ] }, { @@ -666,7 +670,16 @@ "colab_type": "text" }, "source": [ - "#### The base `Layer` class in Keras" + "#### The base Layer class in Keras" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**A `Dense` layer implemented as a `Layer` subclass**" ] }, { @@ -720,7 +733,7 @@ "colab_type": "text" }, "source": [ - "#### Automatic shape inference: building layers on the fly" + "#### Automatic shape inference: Building layers on the fly" ] }, { @@ -782,7 +795,7 @@ "colab_type": "text" }, "source": [ - "### The \"compile\" step: configuring the learning process" + "### The \"compile\" step: Configuring the learning process" ] }, { @@ -827,7 +840,7 @@ "colab_type": "text" }, "source": [ - "### Understanding the `fit` method" + "### Understanding the fit() method" ] }, { @@ -836,7 +849,7 @@ "colab_type": "text" }, "source": [ - "**Calling `fit` with NumPy data**" + "**Calling `fit()` with NumPy data**" ] }, { @@ -872,7 +885,7 @@ "colab_type": "text" }, "source": [ - "### Monitoring loss & metrics on validation data" + "### Monitoring loss and metrics on validation data" ] }, { @@ -881,7 +894,7 @@ "colab_type": "text" }, "source": [ - "**Using the validation data argument**" + "**Using the `validation_data` argument**" ] }, { @@ -921,7 +934,7 @@ "colab_type": "text" }, "source": [ - "### Inference: using a model after training" + "### Inference: Using a model after training" ] }, { @@ -942,7 +955,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter04_getting-started-with-neural-networks.ipynb b/chapter04_getting-started-with-neural-networks.ipynb index bd9ade7362..ba77a17d45 100644 --- a/chapter04_getting-started-with-neural-networks.ipynb +++ b/chapter04_getting-started-with-neural-networks.ipynb @@ -15,7 +15,7 @@ "colab_type": "text" }, "source": [ - "# Getting started with neural networks: classification and regression" + "# Getting started with neural networks: Classification and regression" ] }, { @@ -24,7 +24,7 @@ "colab_type": "text" }, "source": [ - "## Classifying movie reviews: a binary classification example" + "## Classifying movie reviews: A binary classification example" ] }, { @@ -445,7 +445,7 @@ "colab_type": "text" }, "source": [ - "## Classifying newswires: a multiclass classification example" + "## Classifying newswires: A multiclass classification example" ] }, { @@ -994,7 +994,7 @@ "colab_type": "text" }, "source": [ - "## Predicting house prices: a regression example" + "## Predicting house prices: A regression example" ] }, { @@ -1378,7 +1378,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter05_fundamentals-of-ml.ipynb b/chapter05_fundamentals-of-ml.ipynb index ccf374db6b..dd61f4ead8 100644 --- a/chapter05_fundamentals-of-ml.ipynb +++ b/chapter05_fundamentals-of-ml.ipynb @@ -24,7 +24,7 @@ "colab_type": "text" }, "source": [ - "## Generalization: the goal of machine learning" + "## Generalization: The goal of machine learning" ] }, { @@ -699,9 +699,9 @@ }, "outputs": [], "source": [ - "from\u00a0keras\u00a0import\u00a0regularizers\n", + "from tensorflow.keras import regularizers\n", "regularizers.l1(0.001)\n", - "regularizers.l1_l2(l1=0.001,\u00a0l2=0.001)" + "regularizers.l1_l2(l1=0.001, l2=0.001)" ] }, { @@ -751,7 +751,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter07_working-with-keras.ipynb b/chapter07_working-with-keras.ipynb index 9c979b2296..46426e00c0 100644 --- a/chapter07_working-with-keras.ipynb +++ b/chapter07_working-with-keras.ipynb @@ -15,7 +15,7 @@ "colab_type": "text" }, "source": [ - "# Working with Keras: a deep dive" + "# Working with Keras: A deep dive" ] }, { @@ -51,7 +51,7 @@ "colab_type": "text" }, "source": [ - "**The Sequential class**" + "**The `Sequential` class**" ] }, { @@ -227,7 +227,7 @@ "colab_type": "text" }, "source": [ - "**A simple Functional model with two Dense layers**" + "**A simple Functional model with two `Dense` layers**" ] }, { @@ -403,7 +403,7 @@ "priority_data = np.random.random(size=(num_samples, 1))\n", "department_data = np.random.randint(0, 2, size=(num_samples, num_departments))\n", "\n", - "model.compile(optimizer=\"adam\",\n", + "model.compile(optimizer=\"rmsprop\",\n", " loss=[\"mean_squared_error\", \"categorical_crossentropy\"],\n", " metrics=[[\"mean_absolute_error\"], [\"accuracy\"]])\n", "model.fit([title_data, text_body_data, tags_data],\n", @@ -431,7 +431,7 @@ }, "outputs": [], "source": [ - "model.compile(optimizer=\"adam\",\n", + "model.compile(optimizer=\"rmsprop\",\n", " loss={\"priority\": \"mean_squared_error\", \"department\": \"categorical_crossentropy\"},\n", " metrics={\"priority\": [\"mean_absolute_error\"], \"department\": [\"accuracy\"]})\n", "model.fit({\"title\": title_data, \"text_body\": text_body_data, \"tags\": tags_data},\n", @@ -449,16 +449,7 @@ "colab_type": "text" }, "source": [ - "#### The power of the Functional API: access to layer connectivity" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "##### Plotting layer connectivity" + "#### The power of the Functional API: Access to layer connectivity" ] }, { @@ -483,15 +474,6 @@ "keras.utils.plot_model(model, \"ticket_classifier_with_shape_info.png\", show_shapes=True)" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "##### Feature extraction with a Functional model" - ] - }, { "cell_type": "markdown", "metadata": { @@ -576,7 +558,7 @@ "colab_type": "text" }, "source": [ - "### Subclassing the `Model` class" + "### Subclassing the Model class" ] }, { @@ -649,16 +631,21 @@ }, "outputs": [], "source": [ - "model.compile(optimizer=\"adam\",\n", + "model.compile(optimizer=\"rmsprop\",\n", " loss=[\"mean_squared_error\", \"categorical_crossentropy\"],\n", " metrics=[[\"mean_absolute_error\"], [\"accuracy\"]])\n", - "model.fit({\"title\": title_data, \"text_body\": text_body_data, \"tags\": tags_data},\n", + "model.fit({\"title\": title_data,\n", + " \"text_body\": text_body_data,\n", + " \"tags\": tags_data},\n", " [priority_data, department_data],\n", " epochs=1)\n", - "model.evaluate({\"title\": title_data, \"text_body\": text_body_data, \"tags\": tags_data},\n", + "model.evaluate({\"title\": title_data,\n", + " \"text_body\": text_body_data,\n", + " \"tags\": tags_data},\n", " [priority_data, department_data])\n", - "priority_preds, department_preds = model.predict(\n", - " {\"title\": title_data, \"text_body\": text_body_data, \"tags\": tags_data})" + "priority_preds, department_preds = model.predict({\"title\": title_data,\n", + " \"text_body\": text_body_data,\n", + " \"tags\": tags_data})" ] }, { @@ -667,7 +654,7 @@ "colab_type": "text" }, "source": [ - "#### Beware: what subclassed models don't support" + "#### Beware: What subclassed models don't support" ] }, { @@ -758,7 +745,7 @@ "colab_type": "text" }, "source": [ - "### Remember: use the right tool for the job" + "### Remember: Use the right tool for the job" ] }, { @@ -776,7 +763,7 @@ "colab_type": "text" }, "source": [ - "**The standard workflow: `compile()` / `fit()` / `evaluate()` / `predict()`**" + "**The standard workflow: `compile()`, `fit()`, `evaluate()`, `predict()`**" ] }, { @@ -889,7 +876,7 @@ "colab_type": "text" }, "source": [ - "### Using Callbacks" + "### Using callbacks" ] }, { @@ -898,7 +885,7 @@ "colab_type": "text" }, "source": [ - "#### The `EarlyStopping` and `ModelCheckpoint` callbacks" + "#### The EarlyStopping and ModelCheckpoint callbacks" ] }, { @@ -920,8 +907,8 @@ "source": [ "callbacks_list = [\n", " keras.callbacks.EarlyStopping(\n", - " monitor=\"accuracy\",\n", - " patience=1,\n", + " monitor=\"val_accuracy\",\n", + " patience=2,\n", " ),\n", " keras.callbacks.ModelCheckpoint(\n", " filepath=\"checkpoint_path.keras\",\n", @@ -1263,7 +1250,7 @@ "colab_type": "text" }, "source": [ - "### Make it fast with `tf.function`" + "### Make it fast with tf.function" ] }, { @@ -1272,7 +1259,7 @@ "colab_type": "text" }, "source": [ - "**Adding a `tf.function` decorator to our evaluation step function**" + "**Adding a `tf.function` decorator to our evaluation-step function**" ] }, { @@ -1313,7 +1300,7 @@ "colab_type": "text" }, "source": [ - "### Leveraging `fit()` with a custom training loop" + "### Leveraging fit() with a custom training loop" ] }, { @@ -1417,7 +1404,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter08_intro-to-dl-for-computer-vision.ipynb b/chapter08_intro-to-dl-for-computer-vision.ipynb index aaa2f47005..60072bce8a 100644 --- a/chapter08_intro-to-dl-for-computer-vision.ipynb +++ b/chapter08_intro-to-dl-for-computer-vision.ipynb @@ -170,7 +170,7 @@ "colab_type": "text" }, "source": [ - "**An incorrectly-structured convnet missing its max pooling layers**" + "**An incorrectly structured convnet missing its max-pooling layers**" ] }, { @@ -317,7 +317,7 @@ "colab_type": "text" }, "source": [ - "### Building your model" + "### Building the model" ] }, { @@ -404,7 +404,7 @@ "colab_type": "text" }, "source": [ - "**Using `image_dataset_from_directory` to read images from directories**" + "**Using `image_dataset_from_directory` to read images**" ] }, { @@ -431,15 +431,6 @@ " batch_size=32)" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "**Instantiating a Dataset from a NumPy array**" - ] - }, { "cell_type": "code", "execution_count": 0, @@ -454,15 +445,6 @@ "dataset = tf.data.Dataset.from_tensor_slices(random_numbers)" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "**Iterating on a dataset**" - ] - }, { "cell_type": "code", "execution_count": 0, @@ -477,15 +459,6 @@ " break" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "**Batching a dataset**" - ] - }, { "cell_type": "code", "execution_count": 0, @@ -501,15 +474,6 @@ " break" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "**Applying a transformation to Dataset elements using `map()`**" - ] - }, { "cell_type": "code", "execution_count": 0, @@ -531,7 +495,7 @@ "colab_type": "text" }, "source": [ - "**Displaying the shapes of the data and labels yielded by the Dataset**" + "**Displaying the shapes of the data and labels yielded by the `Dataset`**" ] }, { @@ -554,7 +518,7 @@ "colab_type": "text" }, "source": [ - "**Fitting the model using a Dataset**" + "**Fitting the model using a `Dataset`**" ] }, { @@ -984,7 +948,7 @@ "colab_type": "text" }, "source": [ - "**Instantiating and freeze the VGG16 convolutional base**" + "**Instantiating and freezing the VGG16 convolutional base**" ] }, { @@ -1042,7 +1006,7 @@ "colab_type": "text" }, "source": [ - "**Adding data augmentation and a densely-connected classifier to the frozen convolutional base**" + "**Adding a data augmentation stage and a classifier to the convolutional base**" ] }, { @@ -1145,7 +1109,7 @@ "colab_type": "text" }, "source": [ - "**Freezing all layers until the 4th from the last**" + "**Freezing all layers until the fourth from the last**" ] }, { @@ -1214,7 +1178,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter09_part02_modern-convnet-architecture-patterns.ipynb b/chapter09_part02_modern-convnet-architecture-patterns.ipynb index b8136b0764..941946b2e2 100644 --- a/chapter09_part02_modern-convnet-architecture-patterns.ipynb +++ b/chapter09_part02_modern-convnet-architecture-patterns.ipynb @@ -42,7 +42,7 @@ "colab_type": "text" }, "source": [ - "**Case where the target block changes the number of output filters**" + "**Residual block where the number of filters changes**" ] }, { @@ -70,7 +70,7 @@ "colab_type": "text" }, "source": [ - "**Case where the target block includes a max pooling layer**" + "**Case where target block includes a max pooling layer**" ] }, { @@ -147,7 +147,7 @@ "colab_type": "text" }, "source": [ - "### Putting it together: a mini Xception-like model" + "### Putting it together: A mini Xception-like model" ] }, { diff --git a/chapter09_part03_interpreting-what-convnets-learn.ipynb b/chapter09_part03_interpreting-what-convnets-learn.ipynb index 9f9c4aba11..0767d47e71 100644 --- a/chapter09_part03_interpreting-what-convnets-learn.ipynb +++ b/chapter09_part03_interpreting-what-convnets-learn.ipynb @@ -297,7 +297,7 @@ "colab_type": "text" }, "source": [ - "**Creating a \"feature extractor\" model that returns the output of a specific layer**" + "**Creating a feature extractor model**" ] }, { @@ -612,7 +612,7 @@ "colab_type": "text" }, "source": [ - "**Setting up a model that goes from the last convolutional output to the final predictions**" + "**Reapplying the classifier on top of the last convolutional output**" ] }, { @@ -636,7 +636,7 @@ "colab_type": "text" }, "source": [ - "**Retrieving the gradients of the top predicted class with regard to the last convolutional output**" + "**Retrieving the gradients of the top predicted class**" ] }, { @@ -665,7 +665,7 @@ "colab_type": "text" }, "source": [ - "**Gradient pooling and channel importance weighting**" + "**Gradient pooling and channel-importance weighting**" ] }, { @@ -711,7 +711,7 @@ "colab_type": "text" }, "source": [ - "**Superimposing the heatmap with the original picture**" + "**Superimposing the heatmap on the original picture**" ] }, { @@ -750,7 +750,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter10_dl-for-timeseries.ipynb b/chapter10_dl-for-timeseries.ipynb index 24ecd832ac..48735a24cf 100644 --- a/chapter10_dl-for-timeseries.ipynb +++ b/chapter10_dl-for-timeseries.ipynb @@ -33,7 +33,7 @@ "colab_type": "text" }, "source": [ - "## A temperature forecasting example" + "## A temperature-forecasting example" ] }, { @@ -151,7 +151,7 @@ "colab_type": "text" }, "source": [ - "**Computing the number of samples we'll use for each data split.**" + "**Computing the number of samples we'll use for each data split**" ] }, { @@ -231,7 +231,7 @@ "colab_type": "text" }, "source": [ - "**Instantiating Datasets for training, validation, and testing.**" + "**Instantiating datasets for training, validation, and testing**" ] }, { @@ -283,7 +283,7 @@ "colab_type": "text" }, "source": [ - "**Inspecting the output of one of our Datasets.**" + "**Inspecting the output of one of our datasets**" ] }, { @@ -345,7 +345,7 @@ "colab_type": "text" }, "source": [ - "### Let's try a basic machine learning model" + "### Let's try a basic machine-learning model" ] }, { @@ -500,7 +500,7 @@ " callbacks=callbacks)\n", "\n", "model = keras.models.load_model(\"jena_lstm.keras\")\n", - "print(\"Test MAE: {model.evaluate(test_dataset)[1]:.2f}\")" + "print(f\"Test MAE: {model.evaluate(test_dataset)[1]:.2f}\")" ] }, { @@ -561,7 +561,7 @@ "colab_type": "text" }, "source": [ - "**A RNN layer that can process sequences of any length**" + "**An RNN layer that can process sequences of any length**" ] }, { @@ -583,7 +583,7 @@ "colab_type": "text" }, "source": [ - "**A RNN layer that returns only its last output step**" + "**An RNN layer that returns only its last output step**" ] }, { @@ -607,7 +607,7 @@ "colab_type": "text" }, "source": [ - "**A RNN layer that returns its full output sequence**" + "**An RNN layer that returns its full output sequence**" ] }, { @@ -801,7 +801,7 @@ "colab_type": "text" }, "source": [ - "### *_Going even further_*" + "### Going even further" ] }, { @@ -810,7 +810,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter11_part01_introduction.ipynb b/chapter11_part01_introduction.ipynb index 1c9708d04f..3ef20b7618 100644 --- a/chapter11_part01_introduction.ipynb +++ b/chapter11_part01_introduction.ipynb @@ -24,7 +24,7 @@ "colab_type": "text" }, "source": [ - "## Natural Language Processing: the bird's eye view" + "## Natural-language processing: The bird's eye view" ] }, { @@ -69,7 +69,7 @@ "colab_type": "text" }, "source": [ - "### Using the `TextVectorization` layer" + "### Using the TextVectorization layer" ] }, { @@ -255,7 +255,7 @@ "colab_type": "text" }, "source": [ - "## Two approaches for representing groups of words: sets and sequences" + "## Two approaches for representing groups of words: Sets and sequences" ] }, { @@ -380,7 +380,7 @@ "colab_type": "text" }, "source": [ - "### Processing words as a set: the bag-of-words approach" + "### Processing words as a set: The bag-of-words approach" ] }, { @@ -411,14 +411,20 @@ "source": [ "text_vectorization = TextVectorization(\n", " max_tokens=20000,\n", - " output_mode=\"binary\",\n", + " output_mode=\"multi_hot\",\n", ")\n", "text_only_train_ds = train_ds.map(lambda x, y: x)\n", "text_vectorization.adapt(text_only_train_ds)\n", "\n", - "binary_1gram_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "binary_1gram_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "binary_1gram_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))" + "binary_1gram_train_ds = train_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "binary_1gram_val_ds = val_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "binary_1gram_test_ds = test_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)" ] }, { @@ -540,7 +546,7 @@ "text_vectorization = TextVectorization(\n", " ngrams=2,\n", " max_tokens=20000,\n", - " output_mode=\"binary\",\n", + " output_mode=\"multi_hot\",\n", ")" ] }, @@ -562,9 +568,15 @@ "outputs": [], "source": [ "text_vectorization.adapt(text_only_train_ds)\n", - "binary_2gram_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "binary_2gram_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "binary_2gram_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))\n", + "binary_2gram_train_ds = train_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "binary_2gram_val_ds = val_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "binary_2gram_test_ds = test_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", "\n", "model = get_model()\n", "model.summary()\n", @@ -619,7 +631,7 @@ "colab_type": "text" }, "source": [ - "**Configuring the `TextVectorization` layer to return TF-IDF-weighted outputs**" + "**Configuring `TextVectorization` to return TF-IDF-weighted outputs**" ] }, { @@ -656,9 +668,15 @@ "source": [ "text_vectorization.adapt(text_only_train_ds)\n", "\n", - "tfidf_2gram_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "tfidf_2gram_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "tfidf_2gram_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))\n", + "tfidf_2gram_train_ds = train_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "tfidf_2gram_val_ds = val_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "tfidf_2gram_test_ds = test_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", "\n", "model = get_model()\n", "model.summary()\n", diff --git a/chapter11_part02_sequence-models.ipynb b/chapter11_part02_sequence-models.ipynb index 7bc8912ec9..ee1fac0fbd 100644 --- a/chapter11_part02_sequence-models.ipynb +++ b/chapter11_part02_sequence-models.ipynb @@ -15,7 +15,7 @@ "colab_type": "text" }, "source": [ - "### Processing words as a sequence: the Sequence Model approach" + "### Processing words as a sequence: The sequence model approach" ] }, { @@ -122,9 +122,15 @@ ")\n", "text_vectorization.adapt(text_only_train_ds)\n", "\n", - "int_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "int_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "int_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))" + "int_train_ds = train_ds.map(\n", + " lambda x, y: (text_vectorization(x), y)),\n", + " num_parallel_calls=4)\n", + "int_val_ds = val_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "int_test_ds = test_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)" ] }, { @@ -133,7 +139,7 @@ "colab_type": "text" }, "source": [ - "**A sequence model built on top of one-hot encoded vector sequences**" + "**A sequence model built on one-hot encoded vector sequences**" ] }, { @@ -198,7 +204,7 @@ "colab_type": "text" }, "source": [ - "##### Learning word embeddings with the `Embedding` layer" + "#### Learning word embeddings with the Embedding layer" ] }, { @@ -227,7 +233,7 @@ "colab_type": "text" }, "source": [ - "**Model that uses an Embedding layer trained from scratch**" + "**Model that uses an `Embedding` layer trained from scratch**" ] }, { @@ -264,7 +270,7 @@ "colab_type": "text" }, "source": [ - "###### Understanding padding & masking" + "#### Understanding padding and masking" ] }, { @@ -273,7 +279,7 @@ "colab_type": "text" }, "source": [ - "**Model that uses an Embedding layer trained from scratch, with masking enabled**" + "**Using an `Embedding` layer with masking enabled**" ] }, { @@ -311,16 +317,7 @@ "colab_type": "text" }, "source": [ - "##### Using pretrained word embeddings" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "###### Downloading the GloVe word embeddings" + "#### Using pretrained word embeddings" ] }, { @@ -365,15 +362,6 @@ "print(f\"Found {len(embeddings_index)} word vectors.\")" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "###### Loading the GloVe embeddings in the model" - ] - }, { "cell_type": "markdown", "metadata": { @@ -421,15 +409,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text" - }, - "source": [ - "###### Training a simple bidirectional LSTM on top of the GloVe embeddings" - ] - }, { "cell_type": "markdown", "metadata": { diff --git a/chapter11_part03_transformer.ipynb b/chapter11_part03_transformer.ipynb index 397e92e90a..0cab099487 100644 --- a/chapter11_part03_transformer.ipynb +++ b/chapter11_part03_transformer.ipynb @@ -42,7 +42,7 @@ "colab_type": "text" }, "source": [ - "### Multi-Head attention" + "### Multi-head attention" ] }, { @@ -149,9 +149,15 @@ ")\n", "text_vectorization.adapt(text_only_train_ds)\n", "\n", - "int_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "int_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))\n", - "int_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))" + "int_train_ds = train_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "int_val_ds = val_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)\n", + "int_test_ds = test_ds.map(\n", + " lambda x, y: (text_vectorization(x), y),\n", + " num_parallel_calls=4)" ] }, { @@ -160,7 +166,7 @@ "colab_type": "text" }, "source": [ - "**Transformer encoder implemented as a subclassed Layer**" + "**Transformer encoder implemented as a subclassed `Layer`**" ] }, { @@ -215,7 +221,7 @@ "colab_type": "text" }, "source": [ - "**Text classification model that combines the Transformer encoder and a pooling layer**" + "**Using the Transformer encoder for text classification**" ] }, { @@ -278,7 +284,7 @@ "colab_type": "text" }, "source": [ - "#### Using positional encoding to reinject order information" + "#### Using positional encoding to re-inject order information" ] }, { @@ -335,7 +341,7 @@ "colab_type": "text" }, "source": [ - "#### Putting it all together: a text-classification Transformer" + "#### Putting it all together: A text-classification Transformer" ] }, { @@ -344,7 +350,7 @@ "colab_type": "text" }, "source": [ - "**Text classification model that combines positional embedding, the Transformer encoder, and a pooling layer**" + "**Combining the Transformer encoder with positional embedding**" ] }, { diff --git a/chapter11_part04_sequence-to-sequence-learning.ipynb b/chapter11_part04_sequence-to-sequence-learning.ipynb index 388e64dc25..a08929dedb 100644 --- a/chapter11_part04_sequence-to-sequence-learning.ipynb +++ b/chapter11_part04_sequence-to-sequence-learning.ipynb @@ -15,7 +15,7 @@ "colab_type": "text" }, "source": [ - "## Beyond text classification: sequence-to-sequence learning" + "## Beyond text classification: Sequence-to-sequence learning" ] }, { @@ -142,7 +142,7 @@ "colab_type": "text" }, "source": [ - "**Preparing training and validation datasets for the translation task**" + "**Preparing datasets for the translation task**" ] }, { @@ -169,7 +169,7 @@ " spa_texts = list(spa_texts)\n", " dataset = tf.data.Dataset.from_tensor_slices((eng_texts, spa_texts))\n", " dataset = dataset.batch(batch_size)\n", - " dataset = dataset.map(format_dataset)\n", + " dataset = dataset.map(format_dataset, num_parallel_calls=4)\n", " return dataset.shuffle(2048).prefetch(16).cache()\n", "\n", "train_ds = make_dataset(train_pairs)\n", @@ -346,7 +346,7 @@ "colab_type": "text" }, "source": [ - "**The TransformerDecoder**" + "**The `TransformerDecoder`**" ] }, { @@ -427,7 +427,7 @@ "colab_type": "text" }, "source": [ - "#### Putting it all together: a Transformer for machine translation" + "#### Putting it all together: A Transformer for machine translation" ] }, { @@ -586,7 +586,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter12_part01_text-generation.ipynb b/chapter12_part01_text-generation.ipynb index 6fa2f175f7..1c43438d3b 100644 --- a/chapter12_part01_text-generation.ipynb +++ b/chapter12_part01_text-generation.ipynb @@ -123,7 +123,7 @@ "colab_type": "text" }, "source": [ - "**Creating a Dataset that yields the content of a set of text files (one file = one sample)**" + "**Creating a dataset from text files (one file = one sample)**" ] }, { @@ -147,7 +147,7 @@ "colab_type": "text" }, "source": [ - "**Preparing a TextVectorization layer**" + "**Preparing a `TextVectorization` layer**" ] }, { @@ -193,8 +193,7 @@ " y = vectorized_sequences[:, 1:]\n", " return x, y\n", "\n", - "lm_dataset = dataset.map(prepare_lm_dataset)\n", - "lm_dataset = lm_dataset.prefetch(8)" + "lm_dataset = dataset.map(prepare_lm_dataset, num_parallel_calls=4)" ] }, { @@ -357,7 +356,7 @@ "colab_type": "text" }, "source": [ - "**The text generation callback**" + "**The text-generation callback**" ] }, { diff --git a/chapter12_part02_deep-dream.ipynb b/chapter12_part02_deep-dream.ipynb index 4827f1587e..7e01d0fbee 100644 --- a/chapter12_part02_deep-dream.ipynb +++ b/chapter12_part02_deep-dream.ipynb @@ -27,6 +27,15 @@ "### Implementing DeepDream in Keras" ] }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**Fetching the test image**" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -45,6 +54,15 @@ "plt.imshow(keras.utils.load_img(base_image_path))" ] }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**Instantiating a pretrained `InceptionV3` model**" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -57,6 +75,15 @@ "model = inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)" ] }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**Configuring the contribution of each layer to the DeepDream loss**" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -80,6 +107,15 @@ "feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)" ] }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**The DeepDream loss**" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -98,6 +134,15 @@ " return loss" ] }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**The DeepDream gradient ascent process**" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -143,6 +188,15 @@ "max_loss = 15." ] }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**Image processing utilities**" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -169,6 +223,15 @@ " return img" ] }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text" + }, + "source": [ + "**Running gradient ascent over multiple successive \"octaves\"**" + ] + }, { "cell_type": "code", "execution_count": 0, diff --git a/chapter12_part03_neural-style-transfer.ipynb b/chapter12_part03_neural-style-transfer.ipynb index d5ebcb0660..42fd13ef84 100644 --- a/chapter12_part03_neural-style-transfer.ipynb +++ b/chapter12_part03_neural-style-transfer.ipynb @@ -117,7 +117,7 @@ "colab_type": "text" }, "source": [ - "**Loading the pretrained VGG19 network and using it to define a feature extractor**" + "**Using a pretrained VGG19 model to create a feature extractor**" ] }, { diff --git a/chapter12_part04_variational-autoencoders.ipynb b/chapter12_part04_variational-autoencoders.ipynb index 9f44f4bde9..fd6ae9c13a 100644 --- a/chapter12_part04_variational-autoencoders.ipynb +++ b/chapter12_part04_variational-autoencoders.ipynb @@ -254,7 +254,7 @@ "colab_type": "text" }, "source": [ - "**Sampling a grid of points from the 2D latent space and decoding them to images**" + "**Sampling a grid of images from the 2D latent space**" ] }, { diff --git a/chapter12_part05_gans.ipynb b/chapter12_part05_gans.ipynb index 1e5574bf64..993542fc5b 100644 --- a/chapter12_part05_gans.ipynb +++ b/chapter12_part05_gans.ipynb @@ -73,7 +73,7 @@ "colab_type": "text" }, "source": [ - "**Creating a Dataset from a directory of images**" + "**Creating a dataset from a directory of images**" ] }, { @@ -264,7 +264,7 @@ "colab_type": "text" }, "source": [ - "**The GAN Model**" + "**The GAN `Model`**" ] }, { @@ -297,11 +297,13 @@ "\n", " def train_step(self, real_images):\n", " batch_size = tf.shape(real_images)[0]\n", - " random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n", + " random_latent_vectors = tf.random.normal(\n", + " shape=(batch_size, self.latent_dim))\n", " generated_images = self.generator(random_latent_vectors)\n", " combined_images = tf.concat([generated_images, real_images], axis=0)\n", " labels = tf.concat(\n", - " [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0\n", + " [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))],\n", + " axis=0\n", " )\n", " labels += 0.05 * tf.random.uniform(tf.shape(labels))\n", "\n", @@ -314,19 +316,22 @@ " )\n", "\n", " random_latent_vectors = tf.random.normal(\n", - " shape=(batch_size, self.latent_dim))\n", + " shape=(batch_size, self.latent_dim))\n", "\n", " misleading_labels = tf.zeros((batch_size, 1))\n", "\n", " with tf.GradientTape() as tape:\n", - " predictions = self.discriminator(self.generator(random_latent_vectors))\n", + " predictions = self.discriminator(\n", + " self.generator(random_latent_vectors))\n", " g_loss = self.loss_fn(misleading_labels, predictions)\n", " grads = tape.gradient(g_loss, self.generator.trainable_weights)\n", - " self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))\n", + " self.g_optimizer.apply_gradients(\n", + " zip(grads, self.generator.trainable_weights))\n", "\n", " self.d_loss_metric.update_state(d_loss)\n", " self.g_loss_metric.update_state(g_loss)\n", - " return {\"d_loss\": self.d_loss_metric.result(), \"g_loss\": self.g_loss_metric.result()}" + " return {\"d_loss\": self.d_loss_metric.result(),\n", + " \"g_loss\": self.g_loss_metric.result()}" ] }, { @@ -335,7 +340,7 @@ "colab_type": "text" }, "source": [ - "**A callback to sample generated images during training**" + "**A callback that samples generated images during training**" ] }, { @@ -407,7 +412,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter13_best-practices-for-the-real-world.ipynb b/chapter13_best-practices-for-the-real-world.ipynb index 8291a2e4bc..68736349e6 100644 --- a/chapter13_best-practices-for-the-real-world.ipynb +++ b/chapter13_best-practices-for-the-real-world.ipynb @@ -24,7 +24,7 @@ "colab_type": "text" }, "source": [ - "## Getting the most our of your models" + "## Getting the most out of your models" ] }, { @@ -96,7 +96,7 @@ "colab_type": "text" }, "source": [ - "**A KerasTuner HyperModel**" + "**A KerasTuner `HyperModel`**" ] }, { @@ -300,7 +300,7 @@ "colab_type": "text" }, "source": [ - "## Scaling up model training" + "## Scaling-up model training" ] }, { @@ -309,7 +309,7 @@ "colab_type": "text" }, "source": [ - "### Speeding up training on GPU with Mixed Precision" + "### Speeding up training on GPU with mixed precision" ] }, { @@ -430,7 +430,7 @@ "colab_type": "text" }, "source": [ - "## Chapter summary" + "## Summary" ] } ], diff --git a/chapter14_conclusions.ipynb b/chapter14_conclusions.ipynb index d92e0b5c52..e8ce1e0b57 100644 --- a/chapter14_conclusions.ipynb +++ b/chapter14_conclusions.ipynb @@ -87,7 +87,7 @@ "colab_type": "text" }, "source": [ - "#### Densely-connected networks" + "#### Densely connected networks" ] }, { @@ -344,7 +344,7 @@ "colab_type": "text" }, "source": [ - "## Setting the course towards greater generality in AI" + "## Setting the course toward greater generality in AI" ] }, { @@ -353,7 +353,7 @@ "colab_type": "text" }, "source": [ - "### On the importance of setting the right objective: the shortcut rule" + "### On the importance of setting the right objective: The shortcut rule" ] }, { @@ -371,7 +371,7 @@ "colab_type": "text" }, "source": [ - "## Implementing intelligence: the missing ingredients" + "## Implementing intelligence: The missing ingredients" ] }, { @@ -461,7 +461,7 @@ "colab_type": "text" }, "source": [ - "#### Integrating deep learning modules and algorithmic modules into hybrid systems" + "#### Integrating deep-learning modules and algorithmic modules into hybrid systems" ] }, {