Skip to content

Commit d6cc1de

Browse files
committed
fix(app): fixed InputField default values
1 parent 174ea02 commit d6cc1de

File tree

4 files changed

+6
-16
lines changed

4 files changed

+6
-16
lines changed

invokeai/app/invocations/create_denoise_mask.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from torchvision.transforms.functional import resize as tv_resize
77

88
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
9-
from invokeai.app.invocations.constants import DEFAULT_PRECISION
109
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField
1110
from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
1211
from invokeai.app.invocations.model import VAEField
@@ -29,11 +28,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
2928
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
3029
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
3130
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
32-
fp32: bool = InputField(
33-
default=DEFAULT_PRECISION == torch.float32,
34-
description=FieldDescriptions.fp32,
35-
ui_order=4,
36-
)
31+
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32, ui_order=4)
3732

3833
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
3934
if mask_image.mode != "L":

invokeai/app/invocations/create_gradient_mask.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from torchvision.transforms.functional import resize as tv_resize
88

99
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
10-
from invokeai.app.invocations.constants import DEFAULT_PRECISION
1110
from invokeai.app.invocations.fields import (
1211
DenoiseMaskField,
1312
FieldDescriptions,
@@ -76,11 +75,7 @@ class CreateGradientMaskInvocation(BaseInvocation):
7675
ui_order=7,
7776
)
7877
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8)
79-
fp32: bool = InputField(
80-
default=DEFAULT_PRECISION == torch.float32,
81-
description=FieldDescriptions.fp32,
82-
ui_order=9,
83-
)
78+
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32, ui_order=9)
8479

8580
@torch.no_grad()
8681
def invoke(self, context: InvocationContext) -> GradientMaskOutput:

invokeai/app/invocations/image_to_latents.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
1414

1515
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
16-
from invokeai.app.invocations.constants import DEFAULT_PRECISION, LATENT_SCALE_FACTOR
16+
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
1717
from invokeai.app.invocations.fields import (
1818
FieldDescriptions,
1919
ImageField,
@@ -49,7 +49,7 @@ class ImageToLatentsInvocation(BaseInvocation):
4949
# NOTE: tile_size = 0 is a special value. We use this rather than `int | None`, because the workflow UI does not
5050
# offer a way to directly set None values.
5151
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
52-
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
52+
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
5353

5454
@staticmethod
5555
def vae_encode(

invokeai/app/invocations/latents_to_image.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
1313

1414
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
15-
from invokeai.app.invocations.constants import DEFAULT_PRECISION, LATENT_SCALE_FACTOR
15+
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
1616
from invokeai.app.invocations.fields import (
1717
FieldDescriptions,
1818
Input,
@@ -51,7 +51,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
5151
# NOTE: tile_size = 0 is a special value. We use this rather than `int | None`, because the workflow UI does not
5252
# offer a way to directly set None values.
5353
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
54-
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
54+
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
5555

5656
@torch.no_grad()
5757
def invoke(self, context: InvocationContext) -> ImageOutput:

0 commit comments

Comments
 (0)