Skip to content

Commit

Permalink
Update opencv and numpy (chaiNNer-org#2147)
Browse files Browse the repository at this point in the history
* Update opencv and numpy to latest

* downgrade numpy slightly

* pyright fixes

* i hate python i hate python i hate python i hate python

* fix lots of ignored types

* fix ignored blend types

* you win some, you lose some

* ....

* lint

* ivdhfbundfvkhnjdkfvjn

* oh i see why this fails in CI but not locally, lol

* Update backend/src/nodes/impl/blend.py

* Update backend/src/nodes/impl/blend.py
  • Loading branch information
joeyballentine authored Aug 28, 2023
1 parent d3f3e49 commit 74578fc
Show file tree
Hide file tree
Showing 23 changed files with 65 additions and 61 deletions.
8 changes: 4 additions & 4 deletions backend/src/nodes/impl/blend.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,10 +135,10 @@ def __overlay(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
return np.where(b < 0.5, 2 * b * a, 1 - 2 * (1 - b) * (1 - a))

def __difference(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
return cv2.absdiff(a, b)
return np.asarray(cv2.absdiff(a, b))

def __negation(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
return 1 - cv2.absdiff(1 - b, a)
return 1 - cv2.absdiff(1 - b, a) # type: ignore

def __screen(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
return a + b - (a * b) # type: ignore
Expand All @@ -149,7 +149,7 @@ def __xor(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
)

def __subtract(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
return b - a # type: ignore
return b - a

def __divide(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
return b / np.maximum(0.0001, a)
Expand Down Expand Up @@ -222,7 +222,7 @@ def assert_sane(c: int, name: str):
o_rgb = overlay[:, :, :3]

blend_rgb = blender.apply_blend(o_rgb, base, blend_mode)
final_rgb = o_a * blend_rgb + (1 - o_a) * base
final_rgb = o_a * blend_rgb + (1 - o_a) * base # type: ignore
if needs_clipping:
final_rgb = np.clip(final_rgb, 0, 1)

Expand Down
6 changes: 3 additions & 3 deletions backend/src/nodes/impl/cas.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ def create_cas_mask(img: np.ndarray, kernel, bias: float = 2) -> np.ndarray:
l = _luminance(img)
min_l = cv2.erode(l, kernel)
max_l = cv2.dilate(l, kernel, dst=l)
min_d = np.minimum(1.0 - max_l, min_l, out=min_l)
max_l += 1e-8
min_d = np.minimum(1.0 - max_l, min_l, out=min_l) # type: ignore
max_l += 1e-8 # type: ignore
min_d /= max_l
mask = min_d
if bias != 1:
Expand All @@ -53,4 +53,4 @@ def cas_mix(
) -> np.ndarray:
mask = create_cas_mask(img, kernel, bias)
mask = np.dstack((mask,) * get_h_w_c(sharpened)[2])
return img * (1 - mask) + sharpened * mask
return img * (1 - mask) + sharpened * mask # type: ignore
6 changes: 3 additions & 3 deletions backend/src/nodes/impl/color/convert_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,9 @@ def __cmyk_to_rgb(img: np.ndarray) -> np.ndarray:
def __rgb_to_lab(img: np.ndarray) -> np.ndarray:
# 0≤L≤100 , −127≤a≤127, −127≤b≤127
img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
l = img[:, :, 0] / 100
a = (img[:, :, 1] + 127) / 254
b = (img[:, :, 2] + 127) / 254
l = img[:, :, 0] / 100 # type: ignore
a = (img[:, :, 1] + 127) / 254 # type: ignore
b = (img[:, :, 2] + 127) / 254 # type: ignore
return cv2.merge((b, a, l))


Expand Down
4 changes: 2 additions & 2 deletions backend/src/nodes/impl/dithering/palette.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
def _as_float32(image: np.ndarray) -> np.ndarray:
if image.dtype == np.float32:
return image
max_value = MAX_VALUES_BY_DTYPE[image.dtype]
max_value = MAX_VALUES_BY_DTYPE[image.dtype.name]
return image.astype(np.float32) / max_value


Expand All @@ -29,7 +29,7 @@ def kmeans_palette(image: np.ndarray, num_colors: int) -> np.ndarray:
attempts = 10
cv2.setRNGSeed(0)
_, _, center = cv2.kmeans(
flat_image, num_colors, None, criteria, attempts, cv2.KMEANS_PP_CENTERS
flat_image, num_colors, None, criteria, attempts, cv2.KMEANS_PP_CENTERS # type: ignore
)

return center.reshape((1, -1, image.shape[2]))
Expand Down
47 changes: 26 additions & 21 deletions backend/src/nodes/impl/image_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,16 @@
from .color.color import Color

MAX_VALUES_BY_DTYPE = {
np.dtype("int8"): 127,
np.dtype("uint8"): 255,
np.dtype("int16"): 32767,
np.dtype("uint16"): 65535,
np.dtype("int32"): 2147483647,
np.dtype("uint32"): 4294967295,
np.dtype("int64"): 9223372036854775807,
np.dtype("uint64"): 18446744073709551615,
np.dtype("float32"): 1.0,
np.dtype("float64"): 1.0,
np.dtype("int8").name: 127,
np.dtype("uint8").name: 255,
np.dtype("int16").name: 32767,
np.dtype("uint16").name: 65535,
np.dtype("int32").name: 2147483647,
np.dtype("uint32").name: 4294967295,
np.dtype("int64").name: 9223372036854775807,
np.dtype("uint64").name: 18446744073709551615,
np.dtype("float32").name: 1.0,
np.dtype("float64").name: 1.0,
}


Expand Down Expand Up @@ -151,7 +151,9 @@ def shift(img: np.ndarray, amount_x: int, amount_y: int, fill: FillColor) -> np.
fill_color = fill.get_color(c)

h, w, _ = get_h_w_c(img)
translation_matrix = np.float32([[1, 0, amount_x], [0, 1, amount_y]]) # type: ignore
translation_matrix = np.asfarray(
[[1, 0, amount_x], [0, 1, amount_y]], dtype=np.float32
)
img = cv2.warpAffine(
img,
translation_matrix,
Expand Down Expand Up @@ -237,18 +239,18 @@ def create_border(

_, _, c = get_h_w_c(img)
if c == 4 and border_type == BorderType.BLACK:
value = (0, 0, 0, 1)
value = (0.0, 0.0, 0.0, 1.0)
else:
value = 0
value = (0.0,)

cv_border_type: int = border_type.value
if border_type == BorderType.TRANSPARENT:
cv_border_type = cv2.BORDER_CONSTANT
value = 0
value = (0.0,)
img = as_target_channels(img, 4)
elif border_type == BorderType.WHITE:
cv_border_type = cv2.BORDER_CONSTANT
value = (1,) * c
value = (1.0,) * c
elif border_type == BorderType.CUSTOM_COLOR:
assert (
color is not None
Expand All @@ -274,21 +276,24 @@ def create_border(
)


def calculate_ssim(img1: np.ndarray, img2: np.ndarray) -> float:
def calculate_ssim(
img1: np.ndarray,
img2: np.ndarray,
) -> float:
"""Calculates mean localized Structural Similarity Index (SSIM)
between two images."""

C1 = 0.01**2
C2 = 0.03**2

kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
window = np.outer(kernel, kernel.transpose()) # type: ignore

mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
mu1_sq = np.power(mu1, 2)
mu2_sq = np.power(mu2, 2)
mu1_mu2 = np.multiply(mu1, mu2)
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
Expand Down Expand Up @@ -319,7 +324,7 @@ def cv_save_image(path: str, img: np.ndarray, params: List[int]):
except:
_, buf_img = cv2.imencode(f".{extension}", img, params)
with open(path, "wb") as outf:
outf.write(buf_img)
outf.write(buf_img) # type: ignore


def cartesian_product(arrays: List[np.ndarray]) -> np.ndarray:
Expand Down
4 changes: 2 additions & 2 deletions backend/src/nodes/impl/onnx/np_tensor_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def np2nptensor(
# ] # BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
dtype = img.dtype
maxval = MAX_VALUES_BY_DTYPE.get(dtype, 1.0)
maxval = MAX_VALUES_BY_DTYPE.get(dtype.name, 1.0)
t_dtype = np.dtype("float32")
img = img.astype(t_dtype) / maxval # ie: uint8 = /255
# "HWC to CHW" and "numpy to tensor"
Expand Down Expand Up @@ -137,7 +137,7 @@ def nptensor2np(
img_np = np_denorm(img_np) # denormalize if needed
if change_range:
img_np = np.clip(
data_range * img_np, 0, data_range
data_range * img_np, 0, data_range # type: ignore
).round() # np.clip to the data_range

# has to be in range (0,255) before changing to np.uint8, else np.float32
Expand Down
2 changes: 1 addition & 1 deletion backend/src/nodes/impl/pytorch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def np2tensor(
# ] # BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
dtype = img.dtype
maxval = MAX_VALUES_BY_DTYPE.get(dtype, 1.0)
maxval = MAX_VALUES_BY_DTYPE.get(dtype.name, 1.0)
t_dtype = np.dtype("float32")
img = img.astype(t_dtype) / maxval # ie: uint8 = /255
# "HWC to CHW" and "numpy to tensor"
Expand Down
6 changes: 4 additions & 2 deletions backend/src/nodes/impl/rembg/bg.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def alpha_matting_cutout(
foreground = estimate_foreground_ml(img_normalized, alpha)
cutout = stack_images(foreground, alpha)

cutout = np.clip(cutout * 255, 0, 255).astype(np.uint8)
cutout = np.clip(cutout * 255, 0, 255).astype(np.uint8) # type: ignore
cutout = Image.fromarray(cutout)

return cutout
Expand Down Expand Up @@ -99,7 +99,9 @@ def post_process(mask: np.ndarray) -> np.ndarray:
"""
mask = morphologyEx(mask, MORPH_OPEN, kernel)
mask = GaussianBlur(mask, (5, 5), sigmaX=2, sigmaY=2, borderType=BORDER_DEFAULT)
mask = np.where(mask < 127, 0, 255).astype(np.uint8) # convert again to binary
mask = np.where(mask < 127, 0, 255).astype( # type: ignore
np.uint8
) # convert again to binary
return mask


Expand Down
2 changes: 1 addition & 1 deletion backend/src/nodes/properties/outputs/numpy_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def preview_encode(
image_format = "png" if c > 3 or lossless else "jpg"

_, encoded_img = cv2.imencode(f".{image_format}", to_uint8(img, normalized=True)) # type: ignore
base64_img = base64.b64encode(encoded_img).decode("utf8")
base64_img = base64.b64encode(encoded_img).decode("utf8") # type: ignore

return f"data:image/{image_format};base64,{base64_img}", img

Expand Down
4 changes: 2 additions & 2 deletions backend/src/packages/chaiNNer_standard/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@
Dependency(
display_name="Numpy",
pypi_name="numpy",
version="1.23.2",
version="1.24.4",
size_estimate=15 * MB,
),
Dependency(
display_name="OpenCV",
pypi_name="opencv-python",
version="4.7.0.68",
version="4.8.0.76",
size_estimate=30 * MB,
import_name="cv2",
),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def _read_cv(path: str) -> np.ndarray | None:
f'Error reading image image from path "{path}". Image may be corrupt.'
) from e

if img is None:
if img is None: # type: ignore
raise RuntimeError(
f'Error reading image image from path "{path}". Image may be corrupt.'
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,8 @@ def color_levels_node(
in_black_all[i], in_white_all[i], in_gamma_all[i] = 0, 1, 1
out_black_all[i], out_white_all[i] = 0, 1

img = (img - in_black_all) / (in_white_all - in_black_all) # type: ignore
img = (img - in_black_all) / (in_white_all - in_black_all)
img = np.clip(img, 0, 1)
img = (img ** (1 / in_gamma_all)) * (
out_white_all - out_black_all # type: ignore
) + out_black_all
img = (img ** (1 / in_gamma_all)) * (out_white_all - out_black_all) + out_black_all

return img
Original file line number Diff line number Diff line change
Expand Up @@ -95,14 +95,14 @@ def hue_and_saturation_node(

# Adjust hue
if hue != 0:
h += hue
h += hue # type: ignore
h[h >= 360] -= 360 # Wrap positive overflow
h[h < 0] += 360 # Wrap negative overflow

# Adjust saturation
if saturation != 0:
saturation = 1 + saturation
s = np.clip(s * saturation, 0, 1)
s = np.clip(s * saturation, 0, 1) # type: ignore

img = cv2.cvtColor(cv2.merge([h, l, s]), cv2.COLOR_HLS2BGR)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def lens_blur(
final_2 = cv2.filter2D(inter_real, -1, component_imag_t)
final_3 = cv2.filter2D(inter_imag, -1, component_real_t)
final_4 = cv2.filter2D(inter_imag, -1, component_imag_t)
final = final_1 - final_4 + 1j * (final_2 + final_3)
final = final_1 - final_4 + 1j * (final_2 + final_3) # type: ignore
channels.append(final)
component_image = np.stack(
[weighted_sum(channel, component_params) for channel in channels]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def average_color_fix_node(

result = input_img + diff
if alpha_diff is not None:
alpha = alpha + alpha_diff
alpha = alpha + alpha_diff # type: ignore

# add alpha back in
if alpha is not None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def convolve_node(
right=padding,
bottom=padding,
borderType=cv2.BORDER_CONSTANT,
value=0,
value=(0.0,),
)

output = cv2.filter2D(img, -1, kernel)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,14 @@ def distance_transform_node(img: np.ndarray, spread: int) -> np.ndarray:
distanceType=cv2.DIST_L2,
maskSize=5,
dst=black_dist,
dstType=cv2.CV_32F,
dstType=cv2.CV_32F, # type: ignore
)
cv2.distanceTransform(
src=255 - img,
distanceType=cv2.DIST_L2,
maskSize=5,
dst=white_dist,
dstType=cv2.CV_32F,
dstType=cv2.CV_32F, # type: ignore
)

img1 = img.ravel()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def high_pass_node(
if radius == 0 or contrast == 0:
img = img * 0 + 0.5
else:
img = contrast * (img - fast_gaussian_blur(img, radius)) + 0.5 # type: ignore
img = contrast * (img - fast_gaussian_blur(img, radius)) + 0.5

if alpha is not None:
img = np.dstack((img, alpha))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def pixelate_node(
)
)

average_colors = np.mean(np.mean(blocks, axis=1), axis=2)
average_colors = np.mean(np.mean(blocks, axis=1), axis=2) # type: ignore

repeated_colors = np.repeat(
np.repeat(average_colors, block_sizes[0], axis=1), block_sizes[1], axis=0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def unsharp_mask_node(
if threshold == 0:
img = cv2.addWeighted(img, amount + 1, blurred, -amount, 0)
else:
diff = img - blurred # type: ignore
diff = img - blurred
diff = np.sign(diff) * np.maximum(0, np.abs(diff) - threshold)
img = img + diff * amount

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from __future__ import annotations

import math
from enum import Enum

import cv2
Expand Down Expand Up @@ -247,7 +246,7 @@ def blend_images_node(
# copyMakeBorder will create black border if base not converted to RGBA first
base = convert_to_BGRA(base, base_channel_count)
base = cv2.copyMakeBorder(
base, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0
base, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(0.0,)
)
assert isinstance(base, np.ndarray)
else: # Make sure cached image not being worked on regardless
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def stack_images_node(
assert (
fixed_imgs[i].dtype == fixed_imgs[0].dtype
), "The image types are not the same and could not be auto-fixed"
return cv2.hconcat(fixed_imgs) # type: ignore
return cv2.hconcat(fixed_imgs)
elif orientation == Orientation.VERTICAL:
for i in range(len(fixed_imgs)):
assert (
Expand All @@ -223,4 +223,4 @@ def stack_images_node(
assert (
fixed_imgs[i].dtype == fixed_imgs[0].dtype
), "The image types are not the same and could not be auto-fixed"
return cv2.vconcat(fixed_imgs) # type: ignore
return cv2.vconcat(fixed_imgs)
Loading

0 comments on commit 74578fc

Please sign in to comment.