Skip to content

Commit c57a7af

Browse files
authored
Merge branch 'main' into stalker7779/modular_seamless
2 parents adf1a97 + daa5a88 commit c57a7af

File tree

134 files changed

+3924
-2610
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

134 files changed

+3924
-2610
lines changed

docker/Dockerfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
5555
FROM node:20-slim AS web-builder
5656
ENV PNPM_HOME="/pnpm"
5757
ENV PATH="$PNPM_HOME:$PATH"
58+
RUN corepack use [email protected]
5859
RUN corepack enable
5960

6061
WORKDIR /build

invokeai/app/api/routers/model_manager.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import traceback
77
from copy import deepcopy
88
from tempfile import TemporaryDirectory
9-
from typing import Any, Dict, List, Optional, Type
9+
from typing import List, Optional, Type
1010

1111
from fastapi import Body, Path, Query, Response, UploadFile
1212
from fastapi.responses import FileResponse, HTMLResponse
@@ -430,13 +430,11 @@ async def delete_model_image(
430430
async def install_model(
431431
source: str = Query(description="Model source to install, can be a local path, repo_id, or remote URL"),
432432
inplace: Optional[bool] = Query(description="Whether or not to install a local model in place", default=False),
433-
# TODO(MM2): Can we type this?
434-
config: Optional[Dict[str, Any]] = Body(
435-
description="Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
436-
default=None,
433+
access_token: Optional[str] = Query(description="access token for the remote resource", default=None),
434+
config: ModelRecordChanges = Body(
435+
description="Object containing fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
437436
example={"name": "string", "description": "string"},
438437
),
439-
access_token: Optional[str] = None,
440438
) -> ModelInstallJob:
441439
"""Install a model using a string identifier.
442440
@@ -451,8 +449,9 @@ async def install_model(
451449
- model/name:fp16:path/to/model.safetensors
452450
- model/name::path/to/model.safetensors
453451
454-
`config` is an optional dict containing model configuration values that will override
455-
the ones that are probed automatically.
452+
`config` is a ModelRecordChanges object. Fields in this object will override
453+
the ones that are probed automatically. Pass an empty object to accept
454+
all the defaults.
456455
457456
`access_token` is an optional access token for use with Urls that require
458457
authentication.
@@ -737,7 +736,7 @@ async def convert_model(
737736
# write the converted file to the convert path
738737
raw_model = converted_model.model
739738
assert hasattr(raw_model, "save_pretrained")
740-
raw_model.save_pretrained(convert_path)
739+
raw_model.save_pretrained(convert_path) # type: ignore
741740
assert convert_path.exists()
742741

743742
# temporarily rename the original safetensors file so that there is no naming conflict
@@ -750,12 +749,12 @@ async def convert_model(
750749
try:
751750
new_key = installer.install_path(
752751
convert_path,
753-
config={
754-
"name": original_name,
755-
"description": model_config.description,
756-
"hash": model_config.hash,
757-
"source": model_config.source,
758-
},
752+
config=ModelRecordChanges(
753+
name=original_name,
754+
description=model_config.description,
755+
hash=model_config.hash,
756+
source=model_config.source,
757+
),
759758
)
760759
except Exception as e:
761760
logger.error(str(e))

invokeai/app/invocations/spandrel_image_to_image.py

Lines changed: 42 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from invokeai.backend.tiles.utils import TBLR, Tile
2424

2525

26-
@invocation("spandrel_image_to_image", title="Image-to-Image", tags=["upscale"], category="upscale", version="1.2.0")
26+
@invocation("spandrel_image_to_image", title="Image-to-Image", tags=["upscale"], category="upscale", version="1.3.0")
2727
class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
2828
"""Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel)."""
2929

@@ -36,16 +36,6 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
3636
tile_size: int = InputField(
3737
default=512, description="The tile size for tiled image-to-image. Set to 0 to disable tiling."
3838
)
39-
scale: float = InputField(
40-
default=4.0,
41-
gt=0.0,
42-
le=16.0,
43-
description="The final scale of the output image. If the model does not upscale the image, this will be ignored.",
44-
)
45-
fit_to_multiple_of_8: bool = InputField(
46-
default=False,
47-
description="If true, the output image will be resized to the nearest multiple of 8 in both dimensions.",
48-
)
4939

5040
@classmethod
5141
def scale_tile(cls, tile: Tile, scale: int) -> Tile:
@@ -152,6 +142,47 @@ def upscale_image(
152142

153143
return pil_image
154144

145+
@torch.inference_mode()
146+
def invoke(self, context: InvocationContext) -> ImageOutput:
147+
# Images are converted to RGB, because most models don't support an alpha channel. In the future, we may want to
148+
# revisit this.
149+
image = context.images.get_pil(self.image.image_name, mode="RGB")
150+
151+
# Load the model.
152+
spandrel_model_info = context.models.load(self.image_to_image_model)
153+
154+
# Do the upscaling.
155+
with spandrel_model_info as spandrel_model:
156+
assert isinstance(spandrel_model, SpandrelImageToImageModel)
157+
158+
# Upscale the image
159+
pil_image = self.upscale_image(image, self.tile_size, spandrel_model, context.util.is_canceled)
160+
161+
image_dto = context.images.save(image=pil_image)
162+
return ImageOutput.build(image_dto)
163+
164+
165+
@invocation(
166+
"spandrel_image_to_image_autoscale",
167+
title="Image-to-Image (Autoscale)",
168+
tags=["upscale"],
169+
category="upscale",
170+
version="1.0.0",
171+
)
172+
class SpandrelImageToImageAutoscaleInvocation(SpandrelImageToImageInvocation):
173+
"""Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel) until the target scale is reached."""
174+
175+
scale: float = InputField(
176+
default=4.0,
177+
gt=0.0,
178+
le=16.0,
179+
description="The final scale of the output image. If the model does not upscale the image, this will be ignored.",
180+
)
181+
fit_to_multiple_of_8: bool = InputField(
182+
default=False,
183+
description="If true, the output image will be resized to the nearest multiple of 8 in both dimensions.",
184+
)
185+
155186
@torch.inference_mode()
156187
def invoke(self, context: InvocationContext) -> ImageOutput:
157188
# Images are converted to RGB, because most models don't support an alpha channel. In the future, we may want to

invokeai/app/services/model_install/model_install_base.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
from abc import ABC, abstractmethod
55
from pathlib import Path
6-
from typing import Any, Dict, List, Optional, Union
6+
from typing import List, Optional, Union
77

88
from pydantic.networks import AnyHttpUrl
99

@@ -12,7 +12,7 @@
1212
from invokeai.app.services.events.events_base import EventServiceBase
1313
from invokeai.app.services.invoker import Invoker
1414
from invokeai.app.services.model_install.model_install_common import ModelInstallJob, ModelSource
15-
from invokeai.app.services.model_records import ModelRecordServiceBase
15+
from invokeai.app.services.model_records import ModelRecordChanges, ModelRecordServiceBase
1616
from invokeai.backend.model_manager import AnyModelConfig
1717

1818

@@ -64,15 +64,15 @@ def event_bus(self) -> Optional["EventServiceBase"]:
6464
def register_path(
6565
self,
6666
model_path: Union[Path, str],
67-
config: Optional[Dict[str, Any]] = None,
67+
config: Optional[ModelRecordChanges] = None,
6868
) -> str:
6969
"""
7070
Probe and register the model at model_path.
7171
7272
This keeps the model in its current location.
7373
7474
:param model_path: Filesystem Path to the model.
75-
:param config: Dict of attributes that will override autoassigned values.
75+
:param config: ModelRecordChanges object that will override autoassigned model record values.
7676
:returns id: The string ID of the registered model.
7777
"""
7878

@@ -92,7 +92,7 @@ def unconditionally_delete(self, key: str) -> None:
9292
def install_path(
9393
self,
9494
model_path: Union[Path, str],
95-
config: Optional[Dict[str, Any]] = None,
95+
config: Optional[ModelRecordChanges] = None,
9696
) -> str:
9797
"""
9898
Probe, register and install the model in the models directory.
@@ -101,22 +101,22 @@ def install_path(
101101
the models directory handled by InvokeAI.
102102
103103
:param model_path: Filesystem Path to the model.
104-
:param config: Dict of attributes that will override autoassigned values.
104+
:param config: ModelRecordChanges object that will override autoassigned model record values.
105105
:returns id: The string ID of the registered model.
106106
"""
107107

108108
@abstractmethod
109109
def heuristic_import(
110110
self,
111111
source: str,
112-
config: Optional[Dict[str, Any]] = None,
112+
config: Optional[ModelRecordChanges] = None,
113113
access_token: Optional[str] = None,
114114
inplace: Optional[bool] = False,
115115
) -> ModelInstallJob:
116116
r"""Install the indicated model using heuristics to interpret user intentions.
117117
118118
:param source: String source
119-
:param config: Optional dict. Any fields in this dict
119+
:param config: Optional ModelRecordChanges object. Any fields in this object
120120
will override corresponding autoassigned probe fields in the
121121
model's config record as described in `import_model()`.
122122
:param access_token: Optional access token for remote sources.
@@ -147,7 +147,7 @@ def heuristic_import(
147147
def import_model(
148148
self,
149149
source: ModelSource,
150-
config: Optional[Dict[str, Any]] = None,
150+
config: Optional[ModelRecordChanges] = None,
151151
) -> ModelInstallJob:
152152
"""Install the indicated model.
153153

invokeai/app/services/model_install/model_install_common.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
import traceback
33
from enum import Enum
44
from pathlib import Path
5-
from typing import Any, Dict, Literal, Optional, Set, Union
5+
from typing import Literal, Optional, Set, Union
66

77
from pydantic import BaseModel, Field, PrivateAttr, field_validator
88
from pydantic.networks import AnyHttpUrl
99
from typing_extensions import Annotated
1010

1111
from invokeai.app.services.download import DownloadJob, MultiFileDownloadJob
12+
from invokeai.app.services.model_records import ModelRecordChanges
1213
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
1314
from invokeai.backend.model_manager.config import ModelSourceType
1415
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
@@ -133,8 +134,9 @@ class ModelInstallJob(BaseModel):
133134
id: int = Field(description="Unique ID for this job")
134135
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
135136
error_reason: Optional[str] = Field(default=None, description="Information about why the job failed")
136-
config_in: Dict[str, Any] = Field(
137-
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
137+
config_in: ModelRecordChanges = Field(
138+
default_factory=ModelRecordChanges,
139+
description="Configuration information (e.g. 'description') to apply to model.",
138140
)
139141
config_out: Optional[AnyModelConfig] = Field(
140142
default=None, description="After successful installation, this will hold the configuration object."

0 commit comments

Comments
 (0)