diff --git a/invokeai/backend/lora/layers/lora_layer.py b/invokeai/backend/lora/layers/lora_layer.py index 8e27836b76c..95270e359c5 100644 --- a/invokeai/backend/lora/layers/lora_layer.py +++ b/invokeai/backend/lora/layers/lora_layer.py @@ -19,7 +19,6 @@ def __init__( self.up = up self.mid = mid self.down = down - self.bias = bias @classmethod def from_state_dict_values( diff --git a/tests/backend/lora/sidecar_layers/concatenated_lora/test_concatenated_lora_linear_sidecar_layer.py b/tests/backend/lora/sidecar_layers/concatenated_lora/test_concatenated_lora_linear_sidecar_layer.py index 8fdc62e9ea9..f773c394d35 100644 --- a/tests/backend/lora/sidecar_layers/concatenated_lora/test_concatenated_lora_linear_sidecar_layer.py +++ b/tests/backend/lora/sidecar_layers/concatenated_lora/test_concatenated_lora_linear_sidecar_layer.py @@ -26,7 +26,8 @@ def test_concatenated_lora_linear_sidecar_layer(): for out_features in sub_layer_out_features: down = torch.randn(rank, in_features) up = torch.randn(out_features, rank) - sub_layers.append(LoRALayer(up=up, mid=None, down=down, alpha=1.0, bias=None)) + bias = torch.randn(out_features) + sub_layers.append(LoRALayer(up=up, mid=None, down=down, alpha=1.0, bias=bias)) concatenated_lora_layer = ConcatenatedLoRALayer(sub_layers, concat_axis=0) # Patch the ConcatenatedLoRA layer into the linear layer. @@ -34,6 +35,7 @@ def test_concatenated_lora_linear_sidecar_layer(): linear_patched.weight.data += ( concatenated_lora_layer.get_weight(linear_patched.weight) * concatenated_lora_layer.scale() ) + linear_patched.bias.data += concatenated_lora_layer.get_bias(linear_patched.bias) * concatenated_lora_layer.scale() # Create a ConcatenatedLoRALinearSidecarLayer. concatenated_lora_linear_sidecar_layer = ConcatenatedLoRALinearSidecarLayer(concatenated_lora_layer, weight=1.0) diff --git a/tests/backend/lora/sidecar_layers/lora/test_lora_linear_sidecar_layer.py b/tests/backend/lora/sidecar_layers/lora/test_lora_linear_sidecar_layer.py index 87c2720cd4b..6e56ce6d41c 100644 --- a/tests/backend/lora/sidecar_layers/lora/test_lora_linear_sidecar_layer.py +++ b/tests/backend/lora/sidecar_layers/lora/test_lora_linear_sidecar_layer.py @@ -20,12 +20,13 @@ def test_lora_linear_sidecar_layer(): rank = 4 down = torch.randn(rank, in_features) up = torch.randn(out_features, rank) - lora_layer = LoRALayer(up=up, mid=None, down=down, alpha=1.0, bias=None) + bias = torch.randn(out_features) + lora_layer = LoRALayer(up=up, mid=None, down=down, alpha=1.0, bias=bias) # Patch the LoRA layer into the linear layer. linear_patched = copy.deepcopy(linear) linear_patched.weight.data += lora_layer.get_weight(linear_patched.weight) * lora_layer.scale() - + linear_patched.bias.data += lora_layer.get_bias(linear_patched.bias) * lora_layer.scale() # Create a LoRALinearSidecarLayer. lora_linear_sidecar_layer = LoRALinearSidecarLayer(lora_layer, weight=1.0) linear_with_sidecar = LoRASidecarModule(linear, [lora_linear_sidecar_layer])