Skip to content

Commit

Permalink
Revert "restore 3.9 compatibility by replacing | with Union[]"
Browse files Browse the repository at this point in the history
This reverts commit 76bafeb.
  • Loading branch information
lstein committed Jul 3, 2023
1 parent 73a2791 commit 2465c79
Show file tree
Hide file tree
Showing 16 changed files with 37 additions and 43 deletions.
4 changes: 2 additions & 2 deletions invokeai/app/cli/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def add_parsers(
commands: list[type],
command_field: str = "type",
exclude_fields: list[str] = ["id", "type"],
add_arguments: Union[Callable[[argparse.ArgumentParser], None],None] = None
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
):
"""Adds parsers for each command to the subparsers"""

Expand All @@ -72,7 +72,7 @@ def add_parsers(
def add_graph_parsers(
subparsers,
graphs: list[LibraryGraph],
add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
):
for graph in graphs:
command_parser = subparsers.add_parser(graph.name, help=graph.description)
Expand Down
3 changes: 2 additions & 1 deletion invokeai/app/cli_app.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)

import argparse
import os
import re
import shlex
import sys
Expand Down Expand Up @@ -347,7 +348,7 @@ def invoke_cli():

# Parse invocation
command: CliCommand = None # type:ignore
system_graph: Union[LibraryGraph,None] = None
system_graph: LibraryGraph|None = None
if args['type'] in system_graph_names:
system_graph = next(filter(lambda g: g.name == args['type'], system_graphs))
invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id))
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/services/board_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def get_board_for_image(


def board_record_to_dto(
board_record: BoardRecord, cover_image_name: Union[str, None], image_count: int
board_record: BoardRecord, cover_image_name: str | None, image_count: int
) -> BoardDTO:
"""Converts a board record to a board DTO."""
return BoardDTO(
Expand Down
4 changes: 2 additions & 2 deletions invokeai/app/services/events.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)

from typing import Any, Union
from typing import Any
from invokeai.app.models.image import ProgressImage
from invokeai.app.util.misc import get_timestamp
from invokeai.app.services.model_manager_service import BaseModelType, ModelType, SubModelType, ModelInfo
Expand Down Expand Up @@ -28,7 +28,7 @@ def emit_generator_progress(
graph_execution_state_id: str,
node: dict,
source_node_id: str,
progress_image: Union[ProgressImage, None],
progress_image: ProgressImage | None,
step: int,
total_steps: int,
) -> None:
Expand Down
5 changes: 2 additions & 3 deletions invokeai/app/services/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import copy
import itertools
import uuid
from types import NoneType
from typing import (
Annotated,
Any,
Expand All @@ -25,8 +26,6 @@
InvocationContext,
)

# in 3.10 this would be "from types import NoneType"
NoneType = type(None)

class EdgeConnection(BaseModel):
node_id: str = Field(description="The id of the node for this edge connection")
Expand Down Expand Up @@ -847,7 +846,7 @@ class Config:
]
}

def next(self) -> Union[BaseInvocation, None]:
def next(self) -> BaseInvocation | None:
"""Gets the next node ready to execute."""

# TODO: enable multiple nodes to execute simultaneously by tracking currently executing nodes
Expand Down
8 changes: 4 additions & 4 deletions invokeai/app/services/image_file_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
from typing import Dict, Optional, Union
from typing import Dict, Optional

from PIL.Image import Image as PILImageType
from PIL import Image, PngImagePlugin
Expand Down Expand Up @@ -80,7 +80,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
__cache: Dict[Path, PILImageType]
__max_cache_size: int

def __init__(self, output_folder: Union[str, Path]):
def __init__(self, output_folder: str | Path):
self.__cache = dict()
self.__cache_ids = Queue()
self.__max_cache_size = 10 # TODO: get this from config
Expand Down Expand Up @@ -164,7 +164,7 @@ def get_path(self, image_name: str, thumbnail: bool = False) -> Path:

return path

def validate_path(self, path: Union[str, Path]) -> bool:
def validate_path(self, path: str | Path) -> bool:
"""Validates the path given for an image or thumbnail."""
path = path if isinstance(path, Path) else Path(path)
return path.exists()
Expand All @@ -175,7 +175,7 @@ def __validate_storage_folders(self) -> None:
for folder in folders:
folder.mkdir(parents=True, exist_ok=True)

def __get_cache(self, image_name: Path) -> Union[PILImageType, None]:
def __get_cache(self, image_name: Path) -> PILImageType | None:
return None if image_name not in self.__cache else self.__cache[image_name]

def __set_cache(self, image_name: Path, image: PILImageType):
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/services/image_record_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def save(
pass

@abstractmethod
def get_most_recent_image_for_board(self, board_id: str) -> Union[ImageRecord, None]:
def get_most_recent_image_for_board(self, board_id: str) -> ImageRecord | None:
"""Gets the most recent image for a board."""
pass

Expand Down
5 changes: 2 additions & 3 deletions invokeai/app/services/invocation_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from queue import Queue

from pydantic import BaseModel, Field
from typing import Union


class InvocationQueueItem(BaseModel):
Expand All @@ -23,7 +22,7 @@ def get(self) -> InvocationQueueItem:
pass

@abstractmethod
def put(self, item: Union[InvocationQueueItem, None]) -> None:
def put(self, item: InvocationQueueItem | None) -> None:
pass

@abstractmethod
Expand Down Expand Up @@ -58,7 +57,7 @@ def get(self) -> InvocationQueueItem:

return item

def put(self, item: Union[InvocationQueueItem, None]) -> None:
def put(self, item: InvocationQueueItem | None) -> None:
self.__queue.put(item)

def cancel(self, graph_execution_state_id: str) -> None:
Expand Down
5 changes: 2 additions & 3 deletions invokeai/app/services/invoker.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from abc import ABC
from threading import Event, Thread
from typing import Union

from ..invocations.baseinvocation import InvocationContext
from .graph import Graph, GraphExecutionState
Expand All @@ -22,7 +21,7 @@ def __init__(self, services: InvocationServices):

def invoke(
self, graph_execution_state: GraphExecutionState, invoke_all: bool = False
) -> Union[str, None]:
) -> str | None:
"""Determines the next node to invoke and enqueues it, preparing if needed.
Returns the id of the queued node, or `None` if there are no nodes left to enqueue."""

Expand All @@ -46,7 +45,7 @@ def invoke(

return invocation.id

def create_execution_state(self, graph: Union[Graph, None] = None) -> GraphExecutionState:
def create_execution_state(self, graph: Graph | None = None) -> GraphExecutionState:
"""Creates a new execution state for the given graph"""
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
self.services.graph_execution_manager.set(new_state)
Expand Down
10 changes: 5 additions & 5 deletions invokeai/app/services/latent_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
from typing import Dict, Union
from typing import Dict

import torch

Expand Down Expand Up @@ -55,7 +55,7 @@ def delete(self, name: str) -> None:
if name in self.__cache:
del self.__cache[name]

def __get_cache(self, name: str) -> Union[torch.Tensor, None]:
def __get_cache(self, name: str) -> torch.Tensor|None:
return None if name not in self.__cache else self.__cache[name]

def __set_cache(self, name: str, data: torch.Tensor):
Expand All @@ -69,9 +69,9 @@ def __set_cache(self, name: str, data: torch.Tensor):
class DiskLatentsStorage(LatentsStorageBase):
"""Stores latents in a folder on disk without caching"""

__output_folder: Union[str, Path]
__output_folder: str | Path

def __init__(self, output_folder: Union[str, Path]):
def __init__(self, output_folder: str | Path):
self.__output_folder = output_folder if isinstance(output_folder, Path) else Path(output_folder)
self.__output_folder.mkdir(parents=True, exist_ok=True)

Expand All @@ -91,4 +91,4 @@ def delete(self, name: str) -> None:

def get_path(self, name: str) -> Path:
return self.__output_folder / name


6 changes: 3 additions & 3 deletions invokeai/backend/generator/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from accelerate.utils import set_seed
from diffusers import DiffusionPipeline
from tqdm import trange
from typing import Callable, List, Iterator, Optional, Type, Union
from typing import Callable, List, Iterator, Optional, Type
from dataclasses import dataclass, field
from diffusers.schedulers import SchedulerMixin as Scheduler

Expand Down Expand Up @@ -178,7 +178,7 @@ def _generator_class(cls)->Type[Generator]:
# ------------------------------------
class Img2Img(InvokeAIGenerator):
def generate(self,
init_image: Union[Image.Image, torch.FloatTensor],
init_image: Image.Image | torch.FloatTensor,
strength: float=0.75,
**keyword_args
)->Iterator[InvokeAIGeneratorOutput]:
Expand All @@ -195,7 +195,7 @@ def _generator_class(cls):
# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff
class Inpaint(Img2Img):
def generate(self,
mask_image: Union[Image.Image, torch.FloatTensor],
mask_image: Image.Image | torch.FloatTensor,
# Seam settings - when 0, doesn't fill seam
seam_size: int = 96,
seam_blur: int = 16,
Expand Down
4 changes: 2 additions & 2 deletions invokeai/backend/generator/inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,8 @@ def get_make_image(
cfg_scale,
ddim_eta,
conditioning,
init_image: Union[Image.Image, torch.FloatTensor],
mask_image: Union[Image.Image, torch.FloatTensor],
init_image: Image.Image | torch.FloatTensor,
mask_image: Image.Image | torch.FloatTensor,
strength: float,
mask_blur_radius: int = 8,
# Seam settings - when 0, doesn't fill seam
Expand Down
6 changes: 1 addition & 5 deletions invokeai/backend/model_management/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,7 @@ def get_model_config_enums():
enums = list()

for model_config in MODEL_CONFIGS:

if hasattr(inspect,'get_annotations'):
fields = inspect.get_annotations(model_config)
else:
fields = model_config.__annotations__
fields = inspect.get_annotations(model_config)
try:
field = fields["model_format"]
except:
Expand Down
9 changes: 5 additions & 4 deletions invokeai/backend/stable_diffusion/diffusers_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from collections.abc import Sequence
from dataclasses import dataclass, field
from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union
from pydantic import Field
from pydantic import BaseModel, Field

import einops
import PIL.Image
Expand All @@ -17,11 +17,12 @@
import torch
import torchvision.transforms as T
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.controlnet import ControlNetModel
from diffusers.models.controlnet import ControlNetModel, ControlNetOutput
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
StableDiffusionPipeline,
)
from diffusers.pipelines.controlnet import MultiControlNetModel

from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import (
StableDiffusionImg2ImgPipeline,
Expand All @@ -45,7 +46,7 @@
InvokeAIDiffuserComponent,
PostprocessingSettings,
)
from .offloading import FullyLoadedModelGroup, ModelGroup
from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup

@dataclass
class PipelineIntermediateState:
Expand Down Expand Up @@ -104,7 +105,7 @@ class AddsMaskGuidance:
_debug: Optional[Callable] = None

def __call__(
self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning
self, step_output: BaseOutput | SchedulerOutput, t: torch.Tensor, conditioning
) -> BaseOutput:
output_class = step_output.__class__ # We'll create a new one with masked data.

Expand Down
4 changes: 2 additions & 2 deletions invokeai/backend/stable_diffusion/offloading.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import weakref
from abc import ABCMeta, abstractmethod
from collections.abc import MutableMapping
from typing import Callable, Union
from typing import Callable

import torch
from accelerate.utils import send_to_device
Expand Down Expand Up @@ -117,7 +117,7 @@ class LazilyLoadedModelGroup(ModelGroup):
"""

_hooks: MutableMapping[torch.nn.Module, RemovableHandle]
_current_model_ref: Callable[[], Union[torch.nn.Module, _NoModel]]
_current_model_ref: Callable[[], torch.nn.Module | _NoModel]

def __init__(self, execution_device: torch.device):
super().__init__(execution_device)
Expand Down
3 changes: 1 addition & 2 deletions invokeai/backend/util/devices.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import torch
from torch import autocast
from typing import Union
from invokeai.app.services.config import InvokeAIAppConfig

CPU_DEVICE = torch.device("cpu")
Expand Down Expand Up @@ -50,7 +49,7 @@ def choose_autocast(precision):
return nullcontext


def normalize_device(device: Union[str, torch.device]) -> torch.device:
def normalize_device(device: str | torch.device) -> torch.device:
"""Ensure device has a device index defined, if appropriate."""
device = torch.device(device)
if device.index is None:
Expand Down

0 comments on commit 2465c79

Please sign in to comment.