diff --git a/invokeai/app/invocations/constants.py b/invokeai/app/invocations/constants.py index cebe0eb30fc..e01589be812 100644 --- a/invokeai/app/invocations/constants.py +++ b/invokeai/app/invocations/constants.py @@ -1,6 +1,7 @@ from typing import Literal from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP +from invokeai.backend.util.devices import TorchDevice LATENT_SCALE_FACTOR = 8 """ @@ -15,3 +16,5 @@ IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] """A literal type for PIL image modes supported by Invoke""" + +DEFAULT_PRECISION = TorchDevice.choose_torch_dtype() diff --git a/invokeai/app/invocations/create_denoise_mask.py b/invokeai/app/invocations/create_denoise_mask.py index d128e0efec6..2d66c20dbd4 100644 --- a/invokeai/app/invocations/create_denoise_mask.py +++ b/invokeai/app/invocations/create_denoise_mask.py @@ -6,7 +6,7 @@ from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation from invokeai.app.invocations.model import VAEField @@ -30,7 +30,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation): mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) fp32: bool = InputField( - default=DEFAULT_PRECISION == "float32", + default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32, ui_order=4, ) diff --git a/invokeai/app/invocations/create_gradient_mask.py b/invokeai/app/invocations/create_gradient_mask.py index 2d2b13fdcc2..089313463bf 100644 --- a/invokeai/app/invocations/create_gradient_mask.py +++ b/invokeai/app/invocations/create_gradient_mask.py @@ -7,7 +7,7 @@ from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import ( DenoiseMaskField, FieldDescriptions, @@ -74,7 +74,7 @@ class CreateGradientMaskInvocation(BaseInvocation): ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8) fp32: bool = InputField( - default=DEFAULT_PRECISION == "float32", + default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32, ui_order=9, ) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 3851caa6474..7260fcb1682 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -59,8 +59,6 @@ from .controlnet_image_processors import ControlField from .model import ModelIdentifierField, UNetField -DEFAULT_PRECISION = TorchDevice.choose_torch_dtype() - def get_scheduler( context: InvocationContext, diff --git a/invokeai/app/invocations/image_to_latents.py b/invokeai/app/invocations/image_to_latents.py index bf2eb414e17..06de530154e 100644 --- a/invokeai/app/invocations/image_to_latents.py +++ b/invokeai/app/invocations/image_to_latents.py @@ -12,7 +12,7 @@ from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import ( FieldDescriptions, ImageField, @@ -44,7 +44,7 @@ class ImageToLatentsInvocation(BaseInvocation): input=Input.Connection, ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) + fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32) @staticmethod def vae_encode(vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor) -> torch.Tensor: diff --git a/invokeai/app/invocations/latents_to_image.py b/invokeai/app/invocations/latents_to_image.py index 648ee7ac68d..202e8bfa1bc 100644 --- a/invokeai/app/invocations/latents_to_image.py +++ b/invokeai/app/invocations/latents_to_image.py @@ -11,7 +11,7 @@ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import ( FieldDescriptions, Input, @@ -46,7 +46,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): input=Input.Connection, ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) + fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32) @torch.no_grad() def invoke(self, context: InvocationContext) -> ImageOutput: