Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enforce line length of 80 chars #1402

Merged
merged 4 commits into from
Feb 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion benchmarks/vectorization_strategy_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ def single_rectangle_mask(corners, mask_shape):
return masks


def fill_single_rectangle(image, centers_x, centers_y, widths, heights, fill_values):
def fill_single_rectangle(
image, centers_x, centers_y, widths, heights, fill_values
):
"""Fill rectangles with fill value into images.

Args:
Expand Down
4 changes: 3 additions & 1 deletion benchmarks/vectorized_auto_contrast.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,9 @@ def augment_bounding_boxes(self, bounding_boxes, **kwargs):
def augment_label(self, label, transformation=None, **kwargs):
return label

def augment_segmentation_mask(self, segmentation_mask, transformation, **kwargs):
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def get_config(self):
Expand Down
4 changes: 3 additions & 1 deletion benchmarks/vectorized_grayscale.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,9 @@ def augment_bounding_boxes(self, bounding_boxes, **kwargs):
def augment_label(self, label, transformation=None, **kwargs):
return label

def augment_segmentation_mask(self, segmentation_mask, transformation, **kwargs):
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def get_config(self):
Expand Down
8 changes: 6 additions & 2 deletions benchmarks/vectorized_random_brightness.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,14 @@ def augment_image(self, image, transformation, **kwargs):
def augment_label(self, label, transformation, **kwargs):
return label

def augment_segmentation_mask(self, segmentation_mask, transformation, **kwargs):
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def augment_bounding_boxes(self, bounding_boxes, transformation=None, **kwargs):
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes

def get_random_transformation(self, **kwargs):
Expand Down
4 changes: 3 additions & 1 deletion benchmarks/vectorized_random_color_jitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@ def augment_bounding_boxes(self, bounding_boxes, **kwargs):
def augment_label(self, label, transformation=None, **kwargs):
return label

def augment_segmentation_mask(self, segmentation_mask, transformation, **kwargs):
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def get_config(self):
Expand Down
12 changes: 9 additions & 3 deletions benchmarks/vectorized_random_contrast.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,18 +76,24 @@ def get_random_transformation(self, **kwargs):

def augment_image(self, image, transformation, **kwargs):
contrast_factor = transformation
output = tf.image.adjust_contrast(image, contrast_factor=contrast_factor)
output = tf.image.adjust_contrast(
image, contrast_factor=contrast_factor
)
output = tf.clip_by_value(output, 0, 255)
output.set_shape(image.shape)
return output

def augment_label(self, label, transformation, **kwargs):
return label

def augment_segmentation_mask(self, segmentation_mask, transformation, **kwargs):
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def augment_bounding_boxes(self, bounding_boxes, transformation=None, **kwargs):
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes

def get_config(self):
Expand Down
4 changes: 3 additions & 1 deletion benchmarks/vectorized_random_hue.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,9 @@ def augment_bounding_boxes(self, bounding_boxes, **kwargs):
def augment_label(self, label, transformation=None, **kwargs):
return label

def augment_segmentation_mask(self, segmentation_mask, transformation, **kwargs):
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def get_config(self):
Expand Down
16 changes: 12 additions & 4 deletions benchmarks/vectorized_random_saturation.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,21 @@ def augment_image(self, image, transformation=None, **kwargs):
# it will be handled correctly when it is a one tensor.
transformation = tf.convert_to_tensor(transformation)
adjust_factor = transformation / (1 - transformation)
return tf.image.adjust_saturation(image, saturation_factor=adjust_factor)
return tf.image.adjust_saturation(
image, saturation_factor=adjust_factor
)

def augment_bounding_boxes(self, bounding_boxes, transformation=None, **kwargs):
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes

def augment_label(self, label, transformation=None, **kwargs):
return label

def augment_segmentation_mask(self, segmentation_mask, transformation, **kwargs):
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def get_config(self):
Expand All @@ -96,7 +102,9 @@ def get_config(self):
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = tf.keras.utils.deserialize_keras_object(config["factor"])
config["factor"] = tf.keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)


Expand Down
6 changes: 5 additions & 1 deletion build_deps/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,11 @@ def create_build_configuration():
write("build:windows --host_cxxopt=/std:" + get_cpp_version())

if is_macos() or is_linux():
if not is_linux_ppc64le() and not is_linux_arm() and not is_linux_aarch64():
if (
not is_linux_ppc64le()
and not is_linux_arm()
and not is_linux_aarch64()
):
write("build --copt=-mavx")
write("build --cxxopt=-std=" + get_cpp_version())
write("build --host_cxxopt=-std=" + get_cpp_version())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,5 +48,7 @@ def pair_with_anchor_boxes(inputs):

if __name__ == "__main__":
dataset = demo_utils.load_voc_dataset(bounding_box_format="xywh")
result = dataset.map(pair_with_anchor_boxes, num_parallel_calls=tf.data.AUTOTUNE)
result = dataset.map(
pair_with_anchor_boxes, num_parallel_calls=tf.data.AUTOTUNE
)
demo_utils.visualize_data(result, bounding_box_format="xywh")
9 changes: 7 additions & 2 deletions examples/layers/object_detection/demo_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ def preprocess_voc(inputs, format, image_size):
source="rel_yxyx",
target=format,
)
return {"images": inputs["image"], "bounding_boxes": inputs["objects"]["bbox"]}
return {
"images": inputs["image"],
"bounding_boxes": inputs["objects"]["bbox"],
}


def load_voc_dataset(
Expand All @@ -40,7 +43,9 @@ def load_voc_dataset(
):
dataset = tfds.load(name, split=tfds.Split.TRAIN, shuffle_files=True)
dataset = dataset.map(
lambda x: preprocess_voc(x, format=bounding_box_format, image_size=image_size),
lambda x: preprocess_voc(
x, format=bounding_box_format, image_size=image_size
),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = dataset.padded_batch(
Expand Down
4 changes: 3 additions & 1 deletion examples/layers/preprocessing/bounding_box/demo_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ def load_voc_dataset(
lambda x: preprocess_voc(x, format=bounding_box_format),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = dataset.apply(tf.data.experimental.dense_to_ragged_batch(batch_size))
dataset = dataset.apply(
tf.data.experimental.dense_to_ragged_batch(batch_size)
)
return dataset


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@

def main():
augment = keras_cv.layers.JitteredResize(
target_size=(640, 640), scale_factor=(0.75, 1.3), bounding_box_format="xywh"
target_size=(640, 640),
scale_factor=(0.75, 1.3),
bounding_box_format="xywh",
)
dataset = demo_utils.load_voc_dataset(bounding_box_format="xywh")
dataset = dataset.map(
Expand All @@ -28,7 +30,8 @@ def main():
demo_utils.visualize_data(dataset, bounding_box_format="xywh")

dataset = dataset.map(
lambda x: augment(x, training=False), num_parallel_calls=tf.data.AUTOTUNE
lambda x: augment(x, training=False),
num_parallel_calls=tf.data.AUTOTUNE,
)
demo_utils.visualize_data(dataset, bounding_box_format="xywh")

Expand Down
5 changes: 4 additions & 1 deletion examples/layers/preprocessing/bounding_box/resizing_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@
def main():
dataset = demo_utils.load_voc_dataset(bounding_box_format="xywh")
resizing = layers.Resizing(
height=300, width=400, pad_to_aspect_ratio=True, bounding_box_format="xywh"
height=300,
width=400,
pad_to_aspect_ratio=True,
bounding_box_format="xywh",
)
dataset = dataset.map(resizing)
demo_utils.visualize_data(dataset, bounding_box_format="xywh")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,10 @@
def main():
ds = demo_utils.load_oxford_dataset()
rand_augment = preprocessing.RandAugment(
value_range=(0, 255), augmentations_per_image=3, magnitude=0.5, rate=0.875
value_range=(0, 255),
augmentations_per_image=3,
magnitude=0.5,
rate=0.875,
)
ds = ds.map(rand_augment, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_dataset(ds)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ def create_custom_pipeline():
layers = preprocessing.RandAugment.get_standard_policy(
value_range=(0, 255), magnitude=0.75, magnitude_stddev=0.3
)
layers = layers[:4] # slice out some layers you don't want for whatever reason
layers = layers[
:4
] # slice out some layers you don't want for whatever reason
layers = layers + [preprocessing.GridMask()]
return preprocessing.RandomAugmentationPipeline(
layers=layers, augmentations_per_image=3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@

def main():
ds = demo_utils.load_oxford_dataset()
rgbshift = preprocessing.RandomChannelShift(value_range=(0, 255), factor=0.4)
rgbshift = preprocessing.RandomChannelShift(
value_range=(0, 255), factor=0.4
)
ds = ds.map(rgbshift, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_dataset(ds)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@

def main():
ds = demo_utils.load_oxford_dataset()
random_color_degeneration = preprocessing.RandomColorDegeneration(factor=(0, 1.0))
random_color_degeneration = preprocessing.RandomColorDegeneration(
factor=(0, 1.0)
)
ds = ds.map(random_color_degeneration, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_dataset(ds)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@

def main():
ds = demo_utils.load_oxford_dataset()
random_hue = preprocessing.RandomHue(factor=(0.0, 1.0), value_range=(0, 255))
random_hue = preprocessing.RandomHue(
factor=(0.0, 1.0), value_range=(0, 255)
)
ds = ds.map(random_hue, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_dataset(ds)

Expand Down
4 changes: 3 additions & 1 deletion examples/models/generative/stable_diffusion/text_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
from keras_cv.models import StableDiffusion

model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
img = model.text_to_image("Photograph of a beautiful horse running through a field")
img = model.text_to_image(
"Photograph of a beautiful horse running through a field"
)
Image.fromarray(img[0]).save("horse.png")
print("Saved at horse.png")
41 changes: 32 additions & 9 deletions examples/training/classification/imagenet/basic_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,15 +52,21 @@
flags.DEFINE_string(
"model_name", None, "The name of the model in KerasCV.models to use."
)
flags.DEFINE_string("imagenet_path", None, "Directory from which to load Imagenet.")
flags.DEFINE_string(
"imagenet_path", None, "Directory from which to load Imagenet."
)
flags.DEFINE_string(
"backup_path", None, "Directory which will be used for training backups."
)
flags.DEFINE_string(
"weights_path", None, "Directory which will be used to store weight checkpoints."
"weights_path",
None,
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_string(
"tensorboard_path", None, "Directory which will be used to store tensorboard logs."
"tensorboard_path",
None,
"Directory which will be used to store tensorboard logs.",
)
flags.DEFINE_integer(
"batch_size",
Expand Down Expand Up @@ -150,7 +156,9 @@
print("Number of accelerators: ", strategy.num_replicas_in_sync)

BATCH_SIZE = FLAGS.batch_size * strategy.num_replicas_in_sync
INITIAL_LEARNING_RATE = FLAGS.initial_learning_rate * strategy.num_replicas_in_sync
INITIAL_LEARNING_RATE = (
FLAGS.initial_learning_rate * strategy.num_replicas_in_sync
)
"""TFRecord-based tf.data.Dataset loads lazily so we can't get the length of the dataset. Temporary."""
NUM_IMAGES = 1281167

Expand Down Expand Up @@ -245,7 +253,12 @@ def augment(img, label):


def lr_warmup_cosine_decay(
global_step, warmup_steps, hold=0, total_steps=0, start_lr=0.0, target_lr=1e-2
global_step,
warmup_steps,
hold=0,
total_steps=0,
start_lr=0.0,
target_lr=1e-2,
):
# Cosine decay
learning_rate = (
Expand All @@ -269,7 +282,9 @@ def lr_warmup_cosine_decay(
global_step > warmup_steps + hold, learning_rate, target_lr
)

learning_rate = tf.where(global_step < warmup_steps, warmup_lr, learning_rate)
learning_rate = tf.where(
global_step < warmup_steps, warmup_lr, learning_rate
)
return learning_rate


Expand All @@ -292,7 +307,9 @@ def lr_warmup_cosine_decay(


class WarmUpCosineDecay(keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, warmup_steps, total_steps, hold, start_lr=0.0, target_lr=1e-2):
def __init__(
self, warmup_steps, total_steps, hold, start_lr=0.0, target_lr=1e-2
):
super().__init__()
self.start_lr = start_lr
self.target_lr = target_lr
Expand Down Expand Up @@ -371,13 +388,19 @@ def __call__(self, step):
callbacks.ModelCheckpoint(
FLAGS.weights_path, save_weights_only=True, save_best_only=True
),
callbacks.TensorBoard(log_dir=FLAGS.tensorboard_path, write_steps_per_second=True),
callbacks.TensorBoard(
log_dir=FLAGS.tensorboard_path, write_steps_per_second=True
),
]

if FLAGS.learning_rate_schedule == REDUCE_ON_PLATEAU:
model_callbacks.append(
callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.1, patience=10, min_delta=0.001, min_lr=0.0001
monitor="val_loss",
factor=0.1,
patience=10,
min_delta=0.001,
min_lr=0.0001,
)
)

Expand Down
Loading