Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
431 changes: 200 additions & 231 deletions chapter02_mathematical-building-blocks.ipynb

Large diffs are not rendered by default.

1,754 changes: 1,754 additions & 0 deletions chapter03_introduction-to-ml-frameworks.ipynb

Large diffs are not rendered by default.

1,280 changes: 1,280 additions & 0 deletions chapter04_classification-and-regression.ipynb

Large diffs are not rendered by default.

676 changes: 425 additions & 251 deletions chapter05_fundamentals-of-ml.ipynb

Large diffs are not rendered by default.

1,883 changes: 1,883 additions & 0 deletions chapter07_deep-dive-keras.ipynb

Large diffs are not rendered by default.

1,005 changes: 1,005 additions & 0 deletions chapter08_image-classification.ipynb

Large diffs are not rendered by default.

356 changes: 356 additions & 0 deletions chapter09_convnet-architecture-patterns.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,356 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"This is a companion notebook for the book [Deep Learning with Python, Third Edition](TODO). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.\n\n**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"!pip install keras keras-hub --upgrade -q"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"import os\n",
"os.environ[\"KERAS_BACKEND\"] = \"jax\""
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Modularity, hierarchy, and reuse"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Residual connections"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"import keras\n",
"from keras import layers\n",
"\n",
"inputs = keras.Input(shape=(32, 32, 3))\n",
"x = layers.Conv2D(32, 3, activation=\"relu\")(inputs)\n",
"residual = x\n",
"x = layers.Conv2D(64, 3, activation=\"relu\", padding=\"same\")(x)\n",
"residual = layers.Conv2D(64, 1)(residual)\n",
"x = layers.add([x, residual])"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"inputs = keras.Input(shape=(32, 32, 3))\n",
"x = layers.Conv2D(32, 3, activation=\"relu\")(inputs)\n",
"residual = x\n",
"x = layers.Conv2D(64, 3, activation=\"relu\", padding=\"same\")(x)\n",
"x = layers.MaxPooling2D(2, padding=\"same\")(x)\n",
"residual = layers.Conv2D(64, 1, strides=2)(residual)\n",
"x = layers.add([x, residual])"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"inputs = keras.Input(shape=(32, 32, 3))\n",
"x = layers.Rescaling(1.0 / 255)(inputs)\n",
"\n",
"def residual_block(x, filters, pooling=False):\n",
" residual = x\n",
" x = layers.Conv2D(filters, 3, activation=\"relu\", padding=\"same\")(x)\n",
" x = layers.Conv2D(filters, 3, activation=\"relu\", padding=\"same\")(x)\n",
" if pooling:\n",
" x = layers.MaxPooling2D(2, padding=\"same\")(x)\n",
" residual = layers.Conv2D(filters, 1, strides=2)(residual)\n",
" elif filters != residual.shape[-1]:\n",
" residual = layers.Conv2D(filters, 1)(residual)\n",
" x = layers.add([x, residual])\n",
" return x\n",
"\n",
"x = residual_block(x, filters=32, pooling=True)\n",
"x = residual_block(x, filters=64, pooling=True)\n",
"x = residual_block(x, filters=128, pooling=False)\n",
"\n",
"x = layers.GlobalAveragePooling2D()(x)\n",
"outputs = layers.Dense(1, activation=\"sigmoid\")(x)\n",
"model = keras.Model(inputs=inputs, outputs=outputs)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Batch normalization"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Depthwise separable convolutions"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Putting it together: a mini Xception-like model"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"import kagglehub\n",
"\n",
"kagglehub.login()"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"import zipfile\n",
"\n",
"download_path = kagglehub.competition_download(\"dogs-vs-cats\")\n",
"\n",
"with zipfile.ZipFile(download_path + \"/train.zip\", \"r\") as zip_ref:\n",
" zip_ref.extractall(\".\")"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"import os, shutil, pathlib\n",
"from keras.utils import image_dataset_from_directory\n",
"\n",
"original_dir = pathlib.Path(\"train\")\n",
"new_base_dir = pathlib.Path(\"dogs_vs_cats_small\")\n",
"\n",
"def make_subset(subset_name, start_index, end_index):\n",
" for category in (\"cat\", \"dog\"):\n",
" dir = new_base_dir / subset_name / category\n",
" os.makedirs(dir)\n",
" fnames = [f\"{category}.{i}.jpg\" for i in range(start_index, end_index)]\n",
" for fname in fnames:\n",
" shutil.copyfile(src=original_dir / fname, dst=dir / fname)\n",
"\n",
"make_subset(\"train\", start_index=0, end_index=1000)\n",
"make_subset(\"validation\", start_index=1000, end_index=1500)\n",
"make_subset(\"test\", start_index=1500, end_index=2500)\n",
"\n",
"batch_size = 64\n",
"image_size = (180, 180)\n",
"train_dataset = image_dataset_from_directory(\n",
" new_base_dir / \"train\",\n",
" image_size=image_size,\n",
" batch_size=batch_size,\n",
")\n",
"validation_dataset = image_dataset_from_directory(\n",
" new_base_dir / \"validation\",\n",
" image_size=image_size,\n",
" batch_size=batch_size,\n",
")\n",
"test_dataset = image_dataset_from_directory(\n",
" new_base_dir / \"test\",\n",
" image_size=image_size,\n",
" batch_size=batch_size,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"from keras import layers\n",
"\n",
"data_augmentation_layers = [\n",
" layers.RandomFlip(\"horizontal\"),\n",
" layers.RandomRotation(0.1),\n",
" layers.RandomZoom(0.2),\n",
"]\n",
"\n",
"def data_augmentation(images, targets):\n",
" for layer in data_augmentation_layers:\n",
" images = layer(images)\n",
" return images, targets\n",
"\n",
"augmented_train_dataset = train_dataset.map(\n",
" data_augmentation, num_parallel_calls=8\n",
")\n",
"augmented_train_dataset = augmented_train_dataset.prefetch(tf.data.AUTOTUNE)"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"import keras\n",
"\n",
"inputs = keras.Input(shape=(180, 180, 3))\n",
"x = layers.Rescaling(1.0 / 255)(inputs)\n",
"x = layers.Conv2D(filters=32, kernel_size=5, use_bias=False)(x)\n",
"\n",
"for size in [32, 64, 128, 256, 512]:\n",
" residual = x\n",
"\n",
" x = layers.BatchNormalization()(x)\n",
" x = layers.Activation(\"relu\")(x)\n",
" x = layers.SeparableConv2D(size, 3, padding=\"same\", use_bias=False)(x)\n",
"\n",
" x = layers.BatchNormalization()(x)\n",
" x = layers.Activation(\"relu\")(x)\n",
" x = layers.SeparableConv2D(size, 3, padding=\"same\", use_bias=False)(x)\n",
"\n",
" x = layers.MaxPooling2D(3, strides=2, padding=\"same\")(x)\n",
"\n",
" residual = layers.Conv2D(\n",
" size, 1, strides=2, padding=\"same\", use_bias=False\n",
" )(residual)\n",
" x = layers.add([x, residual])\n",
"\n",
"x = layers.GlobalAveragePooling2D()(x)\n",
"x = layers.Dropout(0.5)(x)\n",
"outputs = layers.Dense(1, activation=\"sigmoid\")(x)\n",
"model = keras.Model(inputs=inputs, outputs=outputs)"
]
},
{
"cell_type": "code",
"execution_count": 0,
"metadata": {
"colab_type": "code"
},
"outputs": [],
"source": [
"model.compile(\n",
" loss=\"binary_crossentropy\",\n",
" optimizer=\"adam\",\n",
" metrics=[\"accuracy\"],\n",
")\n",
"history = model.fit(\n",
" augmented_train_dataset,\n",
" epochs=100,\n",
" validation_data=validation_dataset,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Beyond convolution: Vision Transformers"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text"
},
"source": [
"### Chapter summary"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "chapter09_convnet-architecture-patterns",
"private_outputs": false,
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Loading