You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
NotImplementedError Traceback (most recent call last)
Cell In[7], line 48
46 # Load and fuse Lora
47 adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
---> 48 pipe.load_lora_weights(adapter_id)
49 pipe.fuse_lora()
File ~/miniconda3/envs/env-p11/lib/python3.11/site-packages/diffusers/loaders/lora_pipeline.py:1856, in FluxLoraLoaderMixin.load_lora_weights(self, pretrained_model_name_or_path_or_dict, adapter_name, **kwargs)
1849 transformer_norm_state_dict = {
1850 k: state_dict.pop(k)
1851 forkinlist(state_dict.keys())
1852 if"transformer."in k and any(norm_key in k fornorm_keyin self._control_lora_supported_norm_keys)
1853 }
1855 transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer
-> 1856 has_param_with_expanded_shape = self._maybe_expand_transformer_param_shape_or_error_(
1857 transformer, transformer_lora_state_dict, transformer_norm_state_dict
1858 )
1860 if has_param_with_expanded_shape:
1861 logger.info(
1862 "The LoRA weights contain parameters that have different shapes that expected by the transformer. "
1863 "As a result, the state_dict of the transformer has been expanded to match the LoRA parameter shapes. "
1864 "To get a comprehensive list of parameter names that were modified, enable debug logging."
1865 )
File ~/miniconda3/envs/env-p11/lib/python3.11/site-packages/diffusers/loaders/lora_pipeline.py:2333, in FluxLoraLoaderMixin._maybe_expand_transformer_param_shape_or_error_(cls, transformer, lora_state_dict, norm_state_dict, prefix)
2331 module_out_features, module_in_features = module_weight.shape
2332 if out_features < module_out_features or in_features < module_in_features:
-> 2333 raise NotImplementedError(
2334 f"Only LoRAs with input/output features higher than the current module's input/output features "
2335 f"are currently supported. The provided LoRA contains {in_features=} and {out_features=}, which "
2336 f"are lower than {module_in_features=} and {module_out_features=}. If you require support for "
2337 f"this please open an issue at https://github.com/huggingface/diffusers/issues."
2338 )
2340 logger.debug(
2341 f'Expanding the nn.Linear input/output features for module="{name}" because the provided LoRA '
2342 f"checkpoint contains higher number of features than expected. The number of input_features will be "
2343 f"expanded from {module_in_features} to {in_features}, and the number of output features will be "
2344 f"expanded from {module_out_features} to {out_features}."
2345 )
2347 has_param_with_shape_update = True
NotImplementedError: Only LoRAs with input/output features higher than the current module's input/output features are currently supported. The provided LoRA contains in_features=64 and out_features=3072, which are lower than module_in_features=384 and module_out_features=3072. If you require support for this please open an issue at https://github.com/huggingface/diffusers/issues.
Describe the bug
Unable to merge LoRAs with input/output features higher than the current module's input/output features. See the example and logs below.
Reproduction
Logs
System Info
Who can help?
@sayakpaul
The text was updated successfully, but these errors were encountered: