Skip to content

Commit

Permalink
revert: inference_mode to no_grad
Browse files Browse the repository at this point in the history
  • Loading branch information
blessedcoolant committed Jul 5, 2023
1 parent f155887 commit 639d88a
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion invokeai/app/invocations/compel.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class Config(InvocationConfig):
},
}

@torch.inference_mode()
@torch.no_grad()
def invoke(self, context: InvocationContext) -> CompelOutput:
tokenizer_info = context.services.model_manager.get_model(
**self.clip.tokenizer.dict(),
Expand Down
8 changes: 4 additions & 4 deletions invokeai/app/invocations/latent.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ def prep_control_data(
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
return control_data

@torch.inference_mode()
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
noise = context.services.latents.get(self.noise.latents_name)

Expand Down Expand Up @@ -369,7 +369,7 @@ class Config(InvocationConfig):
},
}

@torch.inference_mode()
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
noise = context.services.latents.get(self.noise.latents_name)
latent = context.services.latents.get(self.latents.latents_name)
Expand Down Expand Up @@ -461,7 +461,7 @@ class Config(InvocationConfig):
},
}

@torch.inference_mode()
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.services.latents.get(self.latents.latents_name)

Expand Down Expand Up @@ -599,7 +599,7 @@ class Config(InvocationConfig):
},
}

@torch.inference_mode()
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
# image = context.services.images.get(
# self.image.image_type, self.image.image_name
Expand Down
4 changes: 2 additions & 2 deletions invokeai/backend/model_management/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ def apply_lora(
):
original_weights = dict()
try:
with torch.inference_mode():
with torch.no_grad():
for lora, lora_weight in loras:
#assert lora.device.type == "cpu"
for layer_key, layer in lora.layers.items():
Expand All @@ -552,7 +552,7 @@ def apply_lora(
yield # wait for context manager exit

finally:
with torch.inference_mode():
with torch.no_grad():
for module_key, weight in original_weights.items():
model.get_submodule(module_key).weight.copy_(weight)

Expand Down

0 comments on commit 639d88a

Please sign in to comment.