Skip to content

Commit

Permalink
Merge branch 'main' into ryan/bump-diffusers
Browse files Browse the repository at this point in the history
  • Loading branch information
maryhipp authored Oct 28, 2024
2 parents ae327c7 + a84aa5c commit 3f15b75
Show file tree
Hide file tree
Showing 100 changed files with 2,797 additions and 793 deletions.
55 changes: 29 additions & 26 deletions docs/contributing/dev-environment.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,46 +17,49 @@ If you just want to use Invoke, you should use the [installer][installer link].
## Setup

1. Run through the [requirements][requirements link].
1. [Fork and clone][forking link] the [InvokeAI repo][repo link].
1. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
1. Create a python virtual environment inside the directory you just created:
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
3. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
4. Create a python virtual environment inside the directory you just created:

```sh
python3 -m venv .venv --prompt InvokeAI-Dev
```
```sh
python3 -m venv .venv --prompt InvokeAI-Dev
```

1. Activate the venv (you'll need to do this every time you want to run the app):
5. Activate the venv (you'll need to do this every time you want to run the app):
```sh
source .venv/bin/activate
```
```sh
source .venv/bin/activate
```
1. Install the repo as an [editable install][editable install link]:
6. Install the repo as an [editable install][editable install link]:
```sh
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
```
```sh
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
```
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
1. Install the frontend dev toolchain:
7. Install the frontend dev toolchain:
- [`nodejs`](https://nodejs.org/) (recommend v20 LTS)
- [`pnpm`](https://pnpm.io/installation#installing-a-specific-version) (must be v8 - not v9!)
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
1. Do a production build of the frontend:
8. Do a production build of the frontend:
```sh
pnpm build
```
```sh
cd PATH_TO_INVOKEAI_REPO/invokeai/frontend/web
pnpm i
pnpm build
```
1. Start the application:
9. Start the application:
```sh
python scripts/invokeai-web.py
```
```sh
cd PATH_TO_INVOKEAI_REPO
python scripts/invokeai-web.py
```
1. Access the UI at `localhost:9090`.
10. Access the UI at `localhost:9090`.
## Updating the UI
Expand Down
6 changes: 5 additions & 1 deletion invokeai/app/api/routers/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -808,7 +808,11 @@ def get_is_installed(
for model in installed_models:
if model.source == starter_model.source:
return True
if model.name == starter_model.name and model.base == starter_model.base and model.type == starter_model.type:
if (
(model.name == starter_model.name or model.name in starter_model.previous_names)
and model.base == starter_model.base
and model.type == starter_model.type
):
return True
return False

Expand Down
4 changes: 4 additions & 0 deletions invokeai/app/invocations/mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@ class ApplyMaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):

mask: TensorField = InputField(description="The mask tensor to apply.")
image: ImageField = InputField(description="The image to apply the mask to.")
invert: bool = InputField(default=False, description="Whether to invert the mask.")

def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, mode="RGBA")
Expand All @@ -179,6 +180,9 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
mask = mask > 0.5
mask_np = (mask.float() * 255).byte().cpu().numpy().astype(np.uint8)

if self.invert:
mask_np = 255 - mask_np

# Apply the mask only to the alpha channel where the original alpha is non-zero. This preserves the original
# image's transparency - else the transparent regions would end up as opaque black.

Expand Down
19 changes: 12 additions & 7 deletions invokeai/app/services/shared/invocation_context.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from copy import deepcopy
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Optional, Union
Expand Down Expand Up @@ -221,7 +222,7 @@ def save(
)

def get_pil(self, image_name: str, mode: IMAGE_MODES | None = None) -> Image:
"""Gets an image as a PIL Image object.
"""Gets an image as a PIL Image object. This method returns a copy of the image.
Args:
image_name: The name of the image to get.
Expand All @@ -233,11 +234,15 @@ def get_pil(self, image_name: str, mode: IMAGE_MODES | None = None) -> Image:
image = self._services.images.get_pil_image(image_name)
if mode and mode != image.mode:
try:
# convert makes a copy!
image = image.convert(mode)
except ValueError:
self._services.logger.warning(
f"Could not convert image from {image.mode} to {mode}. Using original mode instead."
)
else:
# copy the image to prevent the user from modifying the original
image = image.copy()
return image

def get_metadata(self, image_name: str) -> Optional[MetadataField]:
Expand Down Expand Up @@ -290,15 +295,15 @@ def save(self, tensor: Tensor) -> str:
return name

def load(self, name: str) -> Tensor:
"""Loads a tensor by name.
"""Loads a tensor by name. This method returns a copy of the tensor.
Args:
name: The name of the tensor to load.
Returns:
The loaded tensor.
The tensor.
"""
return self._services.tensors.load(name)
return self._services.tensors.load(name).clone()


class ConditioningInterface(InvocationContextInterface):
Expand All @@ -316,16 +321,16 @@ def save(self, conditioning_data: ConditioningFieldData) -> str:
return name

def load(self, name: str) -> ConditioningFieldData:
"""Loads conditioning data by name.
"""Loads conditioning data by name. This method returns a copy of the conditioning data.
Args:
name: The name of the conditioning data to load.
Returns:
The loaded conditioning data.
The conditioning data.
"""

return self._services.conditioning.load(name)
return deepcopy(self._services.conditioning.load(name))


class ModelsInterface(InvocationContextInterface):
Expand Down
3 changes: 2 additions & 1 deletion invokeai/backend/model_manager/probe.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,8 +462,9 @@ def _scan_model(cls, model_name: str, checkpoint: Path) -> None:
"normal": "normalbae_image_processor",
"sketch": "pidi_image_processor",
"scribble": "lineart_image_processor",
"lineart": "lineart_image_processor",
"lineart anime": "lineart_anime_image_processor",
"lineart_anime": "lineart_anime_image_processor",
"lineart": "lineart_image_processor",
"softedge": "hed_image_processor",
"hed": "hed_image_processor",
"shuffle": "content_shuffle_image_processor",
Expand Down
Loading

0 comments on commit 3f15b75

Please sign in to comment.