Skip to content

Commit

Permalink
Migrate to Keras 3 (keras-team#1690)
Browse files Browse the repository at this point in the history
  • Loading branch information
sachinprasadhs authored and SuryanarayanaY committed Jan 19, 2024
1 parent 1617fd1 commit fef7d5b
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 33 deletions.
21 changes: 12 additions & 9 deletions examples/keras_recipes/creating_tfrecords.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Title: Creating TFRecords
Author: [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)
Date created: 2021/02/27
Last modified: 2021/02/27
Last modified: 2023/12/20
Description: Converting data to the TFRecord format.
Accelerator: GPU
"""
Expand Down Expand Up @@ -41,6 +41,9 @@
"""

import os

os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import json
import pprint
import tensorflow as tf
Expand Down Expand Up @@ -83,7 +86,7 @@

# Download image files
if not os.path.exists(images_dir):
image_zip = tf.keras.utils.get_file(
image_zip = keras.utils.get_file(
"images.zip",
cache_dir=os.path.abspath("."),
origin=images_url,
Expand All @@ -93,7 +96,7 @@

# Download caption annotation files
if not os.path.exists(annotations_dir):
annotation_zip = tf.keras.utils.get_file(
annotation_zip = keras.utils.get_file(
"captions.zip",
cache_dir=os.path.abspath("."),
origin=annotations_url,
Expand Down Expand Up @@ -243,7 +246,7 @@ def parse_tfrecord_fn(example):


def prepare_sample(features):
image = tf.image.resize(features["image"], size=(224, 224))
image = keras.ops.image.resize(features["image"], size=(224, 224))
return image, features["category_id"]


Expand All @@ -265,16 +268,16 @@ def get_dataset(filenames, batch_size):
steps_per_epoch = 50
AUTOTUNE = tf.data.AUTOTUNE

input_tensor = tf.keras.layers.Input(shape=(224, 224, 3), name="image")
model = tf.keras.applications.EfficientNetB0(
input_tensor = keras.layers.Input(shape=(224, 224, 3), name="image")
model = keras.applications.EfficientNetB0(
input_tensor=input_tensor, weights=None, classes=91
)


model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)


Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
26 changes: 16 additions & 10 deletions examples/keras_recipes/ipynb/creating_tfrecords.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"\n",
"**Author:** [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)<br>\n",
"**Date created:** 2021/02/27<br>\n",
"**Last modified:** 2021/02/27<br>\n",
"**Last modified:** 2023/12/20<br>\n",
"**Description:** Converting data to the TFRecord format."
]
},
Expand Down Expand Up @@ -61,6 +61,9 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\n",
"import keras\n",
"import json\n",
"import pprint\n",
"import tensorflow as tf\n",
Expand Down Expand Up @@ -117,14 +120,17 @@
"\n",
"# Download image files\n",
"if not os.path.exists(images_dir):\n",
" image_zip = tf.keras.utils.get_file(\n",
" \"images.zip\", cache_dir=os.path.abspath(\".\"), origin=images_url, extract=True,\n",
" image_zip = keras.utils.get_file(\n",
" \"images.zip\",\n",
" cache_dir=os.path.abspath(\".\"),\n",
" origin=images_url,\n",
" extract=True,\n",
" )\n",
" os.remove(image_zip)\n",
"\n",
"# Download caption annotation files\n",
"if not os.path.exists(annotations_dir):\n",
" annotation_zip = tf.keras.utils.get_file(\n",
" annotation_zip = keras.utils.get_file(\n",
" \"captions.zip\",\n",
" cache_dir=os.path.abspath(\".\"),\n",
" origin=annotations_url,\n",
Expand Down Expand Up @@ -361,7 +367,7 @@
"source": [
"\n",
"def prepare_sample(features):\n",
" image = tf.image.resize(features[\"image\"], size=(224, 224))\n",
" image = keras.ops.image.resize(features[\"image\"], size=(224, 224))\n",
" return image, features[\"category_id\"]\n",
"\n",
"\n",
Expand All @@ -383,16 +389,16 @@
"steps_per_epoch = 50\n",
"AUTOTUNE = tf.data.AUTOTUNE\n",
"\n",
"input_tensor = tf.keras.layers.Input(shape=(224, 224, 3), name=\"image\")\n",
"model = tf.keras.applications.EfficientNetB0(\n",
"input_tensor = keras.layers.Input(shape=(224, 224, 3), name=\"image\")\n",
"model = keras.applications.EfficientNetB0(\n",
" input_tensor=input_tensor, weights=None, classes=91\n",
")\n",
"\n",
"\n",
"model.compile(\n",
" optimizer=tf.keras.optimizers.Adam(),\n",
" loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n",
" metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],\n",
" optimizer=keras.optimizers.Adam(),\n",
" loss=keras.losses.SparseCategoricalCrossentropy(),\n",
" metrics=[keras.metrics.SparseCategoricalAccuracy()],\n",
")\n",
"\n",
"\n",
Expand Down
38 changes: 24 additions & 14 deletions examples/keras_recipes/md/creating_tfrecords.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

**Author:** [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)<br>
**Date created:** 2021/02/27<br>
**Last modified:** 2021/02/27<br>
**Last modified:** 2023/12/20<br>
**Description:** Converting data to the TFRecord format.


Expand Down Expand Up @@ -46,6 +46,9 @@ numeric) into TFRecord.

```python
import os

os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import json
import pprint
import tensorflow as tf
Expand Down Expand Up @@ -90,14 +93,17 @@ annotations_url = (

# Download image files
if not os.path.exists(images_dir):
image_zip = tf.keras.utils.get_file(
"images.zip", cache_dir=os.path.abspath("."), origin=images_url, extract=True,
image_zip = keras.utils.get_file(
"images.zip",
cache_dir=os.path.abspath("."),
origin=images_url,
extract=True,
)
os.remove(image_zip)

# Download caption annotation files
if not os.path.exists(annotations_dir):
annotation_zip = tf.keras.utils.get_file(
annotation_zip = keras.utils.get_file(
"captions.zip",
cache_dir=os.path.abspath("."),
origin=annotations_url,
Expand All @@ -116,9 +122,11 @@ print(f"Number of images: {len(annotations)}")
<div class="k-default-codeblock">
```
Downloading data from http://images.cocodataset.org/zips/val2017.zip
815587328/815585330 [==============================] - 990s 1us/step
815585330/815585330 ━━━━━━━━━━━━━━━━━━━━ 79s 0us/step
Downloading data from http://images.cocodataset.org/annotations/annotations_trainval2017.zip
172441600/252907541 [===================>..........] - ETA: 1:35
252907541/252907541 ━━━━━━━━━━━━━━━━━━━━ 5s 0us/step
The COCO dataset has been downloaded and extracted successfully.
Number of images: 36781
```
</div>
Expand Down Expand Up @@ -282,7 +290,9 @@ Image shape: (640, 529, 3)
```
</div>

![png](/img/examples/keras_recipes/creating_tfrecords/creating_tfrecords_14_1.png)



---
Expand All @@ -298,7 +308,7 @@ use only a few of them, in this case, we are going to use only `image` and `cate
```python

def prepare_sample(features):
image = tf.image.resize(features["image"], size=(224, 224))
image = keras.ops.image.resize(features["image"], size=(224, 224))
return image, features["category_id"]


Expand All @@ -320,16 +330,16 @@ epochs = 1
steps_per_epoch = 50
AUTOTUNE = tf.data.AUTOTUNE

input_tensor = tf.keras.layers.Input(shape=(224, 224, 3), name="image")
model = tf.keras.applications.EfficientNetB0(
input_tensor = keras.layers.Input(shape=(224, 224, 3), name="image")
model = keras.applications.EfficientNetB0(
input_tensor=input_tensor, weights=None, classes=91
)


model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)


Expand All @@ -343,9 +353,9 @@ model.fit(

<div class="k-default-codeblock">
```
50/50 [==============================] - 258s 5s/step - loss: 3.9857 - sparse_categorical_accuracy: 0.2375
50/50 ━━━━━━━━━━━━━━━━━━━━ 146s 2s/step - loss: 3.9206 - sparse_categorical_accuracy: 0.1690
<tensorflow.python.keras.callbacks.History at 0x7f6ca4160d90>
<keras.src.callbacks.history.History at 0x7f70684c27a0>
```
</div>
Expand Down

0 comments on commit fef7d5b

Please sign in to comment.