From 18d3f0bb7988f6445ec6c61b63061335adb4fa73 Mon Sep 17 00:00:00 2001 From: Alexander Jung Date: Sun, 3 May 2020 14:58:56 +0200 Subject: [PATCH] Improve Performance of ElasticTransformation (#624) This patch applies various performance-related changes to `ElasticTransformation`. These cover: (a) the re-use of generated random samples for multiple images in the same batch (with some adjustments so that they are not identical), (b) the caching of generated and re-useable arrays, (c) a performance-optimized smoothing method for the underlying displacement maps and (d) the use of nearest neighbour interpolation (`order=0`) instead of cubic interpolation (`order=3`) as the new default parameter for `order`. These changes lead to a speedup of about 3x to 4x (more for larger images) at a slight loss of visual quality (mainly from `order=0`) and variety (due to the re-use of random samples within each batch). The new smoothing method leads to slightly stronger displacements for larger `sigma` values. --- .../improved/20200223_faster_elastic_tf.md | 19 ++ checks/check_elastic_transformation.py | 7 + checks/check_segmentation_maps.py | 15 ++ imgaug/augmenters/geometric.py | 248 ++++++++++++------ test/augmenters/test_geometric.py | 131 ++++----- test/augmenters/test_meta.py | 2 +- test/augmenters/test_mixed_files.py | 2 +- 7 files changed, 285 insertions(+), 139 deletions(-) create mode 100644 changelogs/master/improved/20200223_faster_elastic_tf.md diff --git a/changelogs/master/improved/20200223_faster_elastic_tf.md b/changelogs/master/improved/20200223_faster_elastic_tf.md new file mode 100644 index 000000000..782bdbcad --- /dev/null +++ b/changelogs/master/improved/20200223_faster_elastic_tf.md @@ -0,0 +1,19 @@ +# Improved Performance of `ElasticTransformation` #624 + +This patch applies various performance-related changes to +`ElasticTransformation`. These cover: (a) the re-use of +generated random samples for multiple images in the same +batch (with some adjustments so that they are not identical), +(b) the caching of generated and re-useable arrays, +(c) a performance-optimized smoothing method for the +underlying displacement maps and (d) the use of nearest +neighbour interpolation (`order=0`) instead of cubic +interpolation (`order=3`) as the new default parameter +for `order`. + +These changes lead to a speedup of about 3x to 4x (more +for larger images) at a slight loss of visual +quality (mainly from `order=0`) and variety (due to the +re-use of random samples within each batch). +The new smoothing method leads to slightly stronger +displacements for larger `sigma` values. diff --git a/checks/check_elastic_transformation.py b/checks/check_elastic_transformation.py index 3c67d90c5..09cfc513d 100644 --- a/checks/check_elastic_transformation.py +++ b/checks/check_elastic_transformation.py @@ -25,6 +25,13 @@ def main(): augs_cv2 = aug_cv2.augment_images([image] * 8) ia.imshow(ia.draw_grid(augs_scipy + augs_cv2, rows=2)) + # check behaviour for multiple consecutive batches + aug = iaa.ElasticTransformation(alpha=(5, 100), sigma=(3, 5)) + images1 = aug(images=[np.copy(image) for _ in range(10)]) + images2 = aug(images=[np.copy(image) for _ in range(10)]) + images3 = aug(images=[np.copy(image) for _ in range(10)]) + ia.imshow(ia.draw_grid(images1 + images2 + images3, rows=3)) + print("alpha=vary, sigma=0.25") augs = [iaa.ElasticTransformation(alpha=alpha, sigma=0.25) for alpha in np.arange(0.0, 50.0, 0.1)] images_aug = [aug.augment_image(image) for aug in augs] diff --git a/checks/check_segmentation_maps.py b/checks/check_segmentation_maps.py index aa1a8ddc0..3d9742111 100644 --- a/checks/check_segmentation_maps.py +++ b/checks/check_segmentation_maps.py @@ -106,6 +106,21 @@ def main(): ]) ) + print("ElasticTransformation alpha=200, sig=20...") + aug = iaa.ElasticTransformation(alpha=200.0, sigma=20.0) + aug_det = aug.to_deterministic() + quokka_aug = aug_det.augment_image(quokka) + segmaps_aug = aug_det.augment_segmentation_maps(segmap) + segmaps_drawn = segmap.draw_on_image(quokka)[0] + segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)[0] + + ia.imshow( + np.hstack([ + segmaps_drawn, + segmaps_aug_drawn + ]) + ) + print("CopAndPad mode=constant...") aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode="constant", pad_cval=128) aug_det = aug.to_deterministic() diff --git a/imgaug/augmenters/geometric.py b/imgaug/augmenters/geometric.py index 8b5c935e9..ba7dec322 100644 --- a/imgaug/augmenters/geometric.py +++ b/imgaug/augmenters/geometric.py @@ -23,6 +23,7 @@ import math import functools +import itertools import numpy as np from scipy import ndimage @@ -4112,16 +4113,6 @@ def get_parameters(self): self.fit_output] -class _ElasticTransformationSamplingResult(object): - def __init__(self, random_states, alphas, sigmas, orders, cvals, modes): - self.random_states = random_states - self.alphas = alphas - self.sigmas = sigmas - self.orders = orders - self.cvals = cvals - self.modes = modes - - # TODO add independent sigmas for x/y # TODO add independent alphas for x/y # TODO add backend arg @@ -4188,8 +4179,9 @@ class ElasticTransformation(meta.Augmenter): ---------- alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional Strength of the distortion field. Higher values mean that pixels are - moved further with respect to the distortion field's direction. Set - this to around 10 times the value of `sigma` for visible effects. + moved further with respect to the distortion field's direction. + Should be a value from interval ``[1.0, inf]``. Set this to around + ``10 * sigma`` for visible effects. * If number, then that value will be used for all images. * If tuple ``(a, b)``, then a random value will be uniformly @@ -4200,8 +4192,11 @@ class ElasticTransformation(meta.Augmenter): sample a value per image. sigma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional - Standard deviation of the gaussian kernel used to smooth the distortion - fields. Higher values (for ``128x128`` images around 5.0) lead to more + Corresponds to the standard deviation of the gaussian kernel used + in the original algorithm. Here, for performance reasons, it denotes + half of an average blur kernel size. (Only for ``sigma<1.5`` is + a gaussian kernel actually used.) + Higher values (for ``128x128`` images around 5.0) lead to more water-like effects, while lower values (for ``128x128`` images around ``1.0`` and lower) lead to more noisy, pixelated images. Set this to around 1/10th of `alpha` for visible effects. @@ -4327,7 +4322,7 @@ class ElasticTransformation(meta.Augmenter): 5: cv2.INTER_CUBIC } - def __init__(self, alpha=(0.0, 40.0), sigma=(4.0, 8.0), order=3, cval=0, + def __init__(self, alpha=(1.0, 40.0), sigma=(4.0, 8.0), order=0, cval=0, mode="constant", polygon_recoverer="auto", seed=None, name=None, @@ -4364,6 +4359,8 @@ def __init__(self, alpha=(0.0, 40.0), sigma=(4.0, 8.0), order=3, cval=0, self._cval_heatmaps = 0.0 self._cval_segmentation_maps = 0 + self._last_meshgrid = None + @classmethod def _handle_order_arg(cls, order): if order == ia.ALL: @@ -4398,7 +4395,7 @@ def _draw_samples(self, nb_images, random_state): cvals = self.cval.draw_samples((nb_images,), random_state=rss[-2]) modes = self.mode.draw_samples((nb_images,), random_state=rss[-1]) return _ElasticTransformationSamplingResult( - rss[0:-5], alphas, sigmas, orders, cvals, modes) + rss[0], alphas, sigmas, orders, cvals, modes) # Added in 0.4.0. def _augment_batch_(self, batch, random_state, parents, hooks): @@ -4417,14 +4414,11 @@ def _augment_batch_(self, batch, random_state, parents, hooks): shapes = batch.get_rowwise_shapes() samples = self._draw_samples(len(shapes), random_state) + smgen = _ElasticTfShiftMapGenerator() + shift_maps = smgen.generate(shapes, samples.alphas, samples.sigmas, + samples.random_state) - for i, shape in enumerate(shapes): - dx, dy = self._generate_shift_maps( - shape[0:2], - alpha=samples.alphas[i], - sigma=samples.sigmas[i], - random_state=samples.random_states[i]) - + for i, (shape, (dx, dy)) in enumerate(zip(shapes, shift_maps)): if batch.images is not None: batch.images[i] = self._augment_image_by_samples( batch.images[i], i, samples, dx, dy) @@ -4617,40 +4611,7 @@ def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.alpha, self.sigma, self.order, self.cval, self.mode] - @classmethod - def _generate_shift_maps(cls, shape, alpha, sigma, random_state): - # pylint: disable=protected-access, invalid-name - assert len(shape) == 2, ("Expected 2d shape, got %s." % (shape,)) - - ksize = blur_lib._compute_gaussian_blur_ksize(sigma) - ksize = ksize + 1 if ksize % 2 == 0 else ksize - - padding = ksize - h, w = shape[0:2] - h_pad = h + 2*padding - w_pad = w + 2*padding - - # The step of random number generation could be batched, so that - # random numbers are sampled once for the whole batch. Would get rid - # of creating many random_states. - dxdy_unsmoothed = random_state.random((2 * h_pad, w_pad)) * 2 - 1 - - dx_unsmoothed = dxdy_unsmoothed[0:h_pad, :] - dy_unsmoothed = dxdy_unsmoothed[h_pad:, :] - - # TODO could this also work with an average blur? would probably be - # faster - dx = blur_lib.blur_gaussian_(dx_unsmoothed, sigma) * alpha - dy = blur_lib.blur_gaussian_(dy_unsmoothed, sigma) * alpha - - if padding > 0: - dx = dx[padding:-padding, padding:-padding] - dy = dy[padding:-padding, padding:-padding] - - return dx, dy - - @classmethod - def _map_coordinates(cls, image, dx, dy, order=1, cval=0, mode="constant"): + def _map_coordinates(self, image, dx, dy, order=1, cval=0, mode="constant"): """Remap pixels in an image according to x/y shift maps. **Supported dtypes**: @@ -4818,16 +4779,23 @@ def _map_coordinates(cls, image, dx, dy, order=1, cval=0, mode="constant"): assert image.ndim == 3, ( "Expected 3-dimensional image, got %d dimensions." % (image.ndim,)) - result = np.copy(image) - height, width = image.shape[0:2] - if backend == "scipy": - h, w = image.shape[0:2] + + h, w, nb_channels = image.shape + last = self._last_meshgrid + if last is not None and last[0].shape == (h, w): + y, x = self._last_meshgrid + else: y, x = np.meshgrid( np.arange(h).astype(np.float32), np.arange(w).astype(np.float32), - indexing="ij") - x_shifted = x + (-1) * dx - y_shifted = y + (-1) * dy + indexing="ij" + ) + self._last_meshgrid = (y, x) + x_shifted = x - dx + y_shifted = y - dy + + if backend == "scipy": + result = np.empty_like(image) for c in sm.xrange(image.shape[2]): remapped_flat = ndimage.interpolation.map_coordinates( @@ -4837,24 +4805,16 @@ def _map_coordinates(cls, image, dx, dy, order=1, cval=0, mode="constant"): cval=cval, mode=mode ) - remapped = remapped_flat.reshape((height, width)) + remapped = remapped_flat.reshape((h, w)) result[..., c] = remapped else: - h, w, nb_channels = image.shape - - y, x = np.meshgrid( - np.arange(h).astype(np.float32), - np.arange(w).astype(np.float32), - indexing="ij") - x_shifted = x + (-1) * dx - y_shifted = y + (-1) * dy - if image.dtype.kind == "f": cval = float(cval) else: cval = int(cval) - border_mode = cls._MAPPING_MODE_SCIPY_CV2[mode] - interpolation = cls._MAPPING_ORDER_SCIPY_CV2[order] + + border_mode = self._MAPPING_MODE_SCIPY_CV2[mode] + interpolation = self._MAPPING_ORDER_SCIPY_CV2[order] is_nearest_neighbour = (interpolation == cv2.INTER_NEAREST) map1, map2 = cv2.convertMaps( @@ -4862,10 +4822,15 @@ def _map_coordinates(cls, image, dx, dy, order=1, cval=0, mode="constant"): nninterpolation=is_nearest_neighbour) # remap only supports up to 4 channels if nb_channels <= 4: + # dst does not seem to improve performance here result = cv2.remap( _normalize_cv2_input_arr_(image), - map1, map2, interpolation=interpolation, - borderMode=border_mode, borderValue=(cval, cval, cval)) + map1, + map2, + interpolation=interpolation, + borderMode=border_mode, + borderValue=tuple([cval] * nb_channels) + ) if image.ndim == 3 and result.ndim == 2: result = result[..., np.newaxis] else: @@ -4889,6 +4854,133 @@ def _map_coordinates(cls, image, dx, dy, order=1, cval=0, mode="constant"): return result +class _ElasticTransformationSamplingResult(object): + def __init__(self, random_state, alphas, sigmas, orders, cvals, modes): + self.random_state = random_state + self.alphas = alphas + self.sigmas = sigmas + self.orders = orders + self.cvals = cvals + self.modes = modes + + +class _ElasticTfShiftMapGenerator(object): + """Class to generate shift/displacement maps for ElasticTransformation. + + This class re-uses samples for multiple examples. This minimizes the amount + of sampling that has to be done. + + Added in 0.5.0. + + """ + + # Not really necessary to have this as a class, considering it has no + # attributes. But it makes things easier to read. + # Added in 0.5.0. + def __init__(self): + pass + + # Added in 0.5.0. + def generate(self, shapes, alphas, sigmas, random_state): + # We will sample shift maps from [0.0, 1.0] and then shift by -0.5 to + # [-0.5, 0.5]. To bring these maps to [-1.0, 1.0], we have to multiply + # somewhere by 2. It is fastes to multiply the (fewer) alphas, which + # we will have to multiply the shift maps with anyways. + alphas *= 2 + + # Configuration for each chunk. + # switch dx / dy, flip dx lr, flip dx ud, flip dy lr, flip dy ud + switch = [False, True] + fliplr_dx = [False, True] + flipud_dx = [False, True] + fliplr_dy = [False, True] + flipud_dy = [False, True] + configs = list( + itertools.product( + switch, fliplr_dx, flipud_dx, fliplr_dy, flipud_dy + ) + ) + + areas = [shape[0] * shape[1] for shape in shapes] + nb_chunks = len(configs) + gen = zip( + self._split_chunks(shapes, nb_chunks), + self._split_chunks(areas, nb_chunks), + self._split_chunks(alphas, nb_chunks), + self._split_chunks(sigmas, nb_chunks) + ) + # "_c" denotes a chunk here + for shapes_c, areas_c, alphas_c, sigmas_c in gen: + area_max = max(areas_c) + + dxdy = random_state.random((2, area_max)) + dxdy -= 0.5 + dx, dy = dxdy[0, :], dxdy[1, :] + + # dx_lr = flip dx left-right, dx_ud = flip dx up-down + # dy_lr, dy_ud analogous + for i, (switch_i, dx_lr, dx_ud, dy_lr, dy_ud) in enumerate(configs): + if i >= len(shapes_c): + break + + dx_i, dy_i = (dx, dy) if not switch_i else (dy, dx) + shape_i = shapes_c[i][0:2] + area_i = shape_i[0] * shape_i[1] + + if area_i == 0: + yield ( + np.zeros(shape_i, dtype=np.float32), + np.zeros(shape_i, dtype=np.float32) + ) + else: + dx_i = dx_i[0:area_i].reshape(shape_i) + dy_i = dy_i[0:area_i].reshape(shape_i) + dx_i, dy_i = self._flip(dx_i, dy_i, + (dx_lr, dx_ud, dy_lr, dy_ud)) + dx_i, dy_i = self._mul_alpha(dx_i, dy_i, alphas_c[i]) + yield self._smoothen_(dx_i, dy_i, sigmas_c[i]) + + # Added in 0.5.0. + @classmethod + def _flip(cls, dx, dy, flips): + # no measureable benefit from using cv2 here + if flips[0]: + dx = np.fliplr(dx) + if flips[1]: + dx = np.flipud(dx) + if flips[2]: + dy = np.fliplr(dy) + if flips[3]: + dy = np.flipud(dy) + return dx, dy + + # Added in 0.5.0. + @classmethod + def _mul_alpha(cls, dx, dy, alpha): + # performance drops for cv2.multiply here + dx = dx * alpha + dy = dy * alpha + return dx, dy + + # Added in 0.5.0. + @classmethod + def _smoothen_(cls, dx, dy, sigma): + if sigma < 1.5: + dx = blur_lib.blur_gaussian_(dx, sigma) + dy = blur_lib.blur_gaussian_(dy, sigma) + else: + ksize = int(round(2*sigma)) + dx = cv2.blur(dx, (ksize, ksize), dst=dx) + dy = cv2.blur(dy, (ksize, ksize), dst=dy) + return dx, dy + + # Added in 0.5.0. + @classmethod + def _split_chunks(cls, iterable, chunk_size): + for i in sm.xrange(0, len(iterable), chunk_size): + yield iterable[i:i+chunk_size] + + class Rot90(meta.Augmenter): """ Rotate images clockwise by multiples of 90 degrees. diff --git a/test/augmenters/test_geometric.py b/test/augmenters/test_geometric.py index d1709aae2..e2208817f 100644 --- a/test/augmenters/test_geometric.py +++ b/test/augmenters/test_geometric.py @@ -6836,7 +6836,7 @@ def test___init___bad_datatype_for_mode_leads_to_failure(self): # ----------- def test_images(self): # test basic funtionality - aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25) + aug = iaa.ElasticTransformation(alpha=5, sigma=0.25) observed = aug.augment_image(self.image) @@ -6850,7 +6850,7 @@ def test_images(self): def test_images_nonsquare(self): # test basic funtionality with non-square images - aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25) + aug = iaa.ElasticTransformation(alpha=2.0, sigma=0.25, order=3) img_nonsquare = np.zeros((50, 100), dtype=np.uint8) + 255 img_nonsquare = np.pad(img_nonsquare, ((100, 100), (100, 100)), mode="constant", constant_values=0) @@ -7010,26 +7010,29 @@ def test_images_alpha_is_stochastic_parameter(self): def test_sigma_is_stochastic_parameter(self): # test sigma being iap.Choice - aug = iaa.ElasticTransformation(alpha=3.0, - sigma=iap.Choice([0.01, 5.0])) - seen = [0, 0] - for _ in sm.xrange(100): - observed = aug.augment_image(self.image) - - observed_std_hori = np.std( - observed.astype(np.float32)[:, 1:] - - observed.astype(np.float32)[:, :-1]) - observed_std_vert = np.std( - observed.astype(np.float32)[1:, :] - - observed.astype(np.float32)[:-1, :]) - observed_std = (observed_std_hori + observed_std_vert) / 2 - - if observed_std > 10.0: - seen[0] += 1 - else: - seen[1] += 1 - assert seen[0] > 10 - assert seen[1] > 10 + for order in [0, 1, 3]: + with self.subTest(order=order): + aug = iaa.ElasticTransformation(alpha=50.0, + sigma=iap.Choice([0.001, 5.0]), + order=order) + seen = [0, 0] + for _ in sm.xrange(100): + observed = aug.augment_image(self.image) + + observed_std_hori = np.std( + observed.astype(np.float32)[:, 1:] + - observed.astype(np.float32)[:, :-1]) + observed_std_vert = np.std( + observed.astype(np.float32)[1:, :] + - observed.astype(np.float32)[:-1, :]) + observed_std = (observed_std_hori + observed_std_vert) / 2 + + if observed_std > 25.0: + seen[0] += 1 + else: + seen[1] += 1 + assert seen[0] > 10 + assert seen[1] > 10 # ----------- # cval @@ -7491,50 +7494,60 @@ def test_empty_bounding_boxes(self): # ----------- def test_image_heatmaps_alignment(self): # test alignment between images and heatmaps - img = np.zeros((80, 80), dtype=np.uint8) - img[:, 30:50] = 255 - img[30:50, :] = 255 - hm = HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(80, 80)) - aug = iaa.ElasticTransformation(alpha=60.0, sigma=4.0, mode="constant", - cval=0) - aug_det = aug.to_deterministic() + for order in [0, 1, 3]: + with self.subTest(order=order): + img = np.zeros((80, 80), dtype=np.uint8) + img[:, 30:50] = 255 + img[30:50, :] = 255 + hm = HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(80, 80)) + aug = iaa.ElasticTransformation( + alpha=60.0, + sigma=4.0, + mode="constant", + cval=0, + order=order + ) + aug_det = aug.to_deterministic() - img_aug = aug_det.augment_image(img) - hm_aug = aug_det.augment_heatmaps([hm])[0] + img_aug = aug_det.augment_image(img) + hm_aug = aug_det.augment_heatmaps([hm])[0] - img_aug_mask = img_aug > 255*0.1 - hm_aug_mask = hm_aug.arr_0to1 > 0.1 - same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0]) - assert hm_aug.shape == (80, 80) - assert hm_aug.arr_0to1.shape == (80, 80, 1) - assert (same / img_aug_mask.size) >= 0.99 + img_aug_mask = img_aug > 255*0.1 + hm_aug_mask = hm_aug.arr_0to1 > 0.1 + same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0]) + assert hm_aug.shape == (80, 80) + assert hm_aug.arr_0to1.shape == (80, 80, 1) + assert (same / img_aug_mask.size) >= 0.97 def test_image_heatmaps_alignment_if_heatmaps_smaller_than_image(self): # test alignment between images and heatmaps # here with heatmaps that are smaller than the image - img = np.zeros((80, 80), dtype=np.uint8) - img[:, 30:50] = 255 - img[30:50, :] = 255 - img_small = ia.imresize_single_image( - img, (40, 40), interpolation="nearest") - hm = HeatmapsOnImage( - img_small.astype(np.float32)/255.0, - shape=(80, 80)) - aug = iaa.ElasticTransformation( - alpha=60.0, sigma=4.0, mode="constant", cval=0) - aug_det = aug.to_deterministic() + for order in [0, 1, 3]: + with self.subTest(order=order): + img = np.zeros((80, 80), dtype=np.uint8) + img[:, 30:50] = 255 + img[30:50, :] = 255 + img_small = ia.imresize_single_image( + img, (40, 40), interpolation="nearest") + hm = HeatmapsOnImage( + img_small.astype(np.float32)/255.0, + shape=(80, 80)) + aug = iaa.ElasticTransformation( + alpha=60.0, sigma=4.0, mode="constant", cval=0) + aug_det = aug.to_deterministic() - img_aug = aug_det.augment_image(img) - hm_aug = aug_det.augment_heatmaps([hm])[0] + img_aug = aug_det.augment_image(img) + hm_aug = aug_det.augment_heatmaps([hm])[0] - img_aug_mask = img_aug > 255*0.1 - hm_aug_mask = ia.imresize_single_image( - hm_aug.arr_0to1, (80, 80), interpolation="nearest" - ) > 0.1 - same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0]) - assert hm_aug.shape == (80, 80) - assert hm_aug.arr_0to1.shape == (40, 40, 1) - assert (same / img_aug_mask.size) >= 0.94 + img_aug_mask = img_aug > 255*0.1 + hm_aug_mask = ia.imresize_single_image( + hm_aug.arr_0to1, (80, 80), interpolation="nearest" + ) > 0.1 + same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0]) + assert hm_aug.shape == (80, 80) + assert hm_aug.arr_0to1.shape == (40, 40, 1) + # TODO this is a fairly low threshold, why is that the case? + assert (same / img_aug_mask.size) >= 0.9 # ----------- # segmaps alignment @@ -7584,7 +7597,7 @@ def test_image_segmaps_alignment_if_heatmaps_smaller_than_image(self): same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0]) assert segmaps_aug.shape == (80, 80) assert segmaps_aug.arr.shape == (40, 40, 1) - assert (same / img_aug_mask.size) >= 0.94 + assert (same / img_aug_mask.size) >= 0.93 # --------- # unusual channel numbers diff --git a/test/augmenters/test_meta.py b/test/augmenters/test_meta.py index 3a7159066..22dbdb854 100644 --- a/test/augmenters/test_meta.py +++ b/test/augmenters/test_meta.py @@ -2966,7 +2966,7 @@ def test_augment_batches_with_many_different_augmenters(self): mode=ia.ALL, cval=(0, 255)), iaa.PiecewiseAffine(scale=(0.1, 0.3)), - iaa.ElasticTransformation(alpha=0.5) + iaa.ElasticTransformation(alpha=2.0) ] nb_iterations = 100 diff --git a/test/augmenters/test_mixed_files.py b/test/augmenters/test_mixed_files.py index 2af780a1d..ba9fd5cfb 100644 --- a/test/augmenters/test_mixed_files.py +++ b/test/augmenters/test_mixed_files.py @@ -73,7 +73,7 @@ def test_determinism(): rotate=(-20, 20), shear=(-20, 20), order=ia.ALL, mode=ia.ALL, cval=(0, 255)), iaa.PiecewiseAffine(scale=(0.1, 0.3)), - iaa.ElasticTransformation(alpha=0.5) + iaa.ElasticTransformation(alpha=10.0) ] augs_affect_geometry = [