diff --git a/src/diffusers/loaders/lora_conversion_utils.py b/src/diffusers/loaders/lora_conversion_utils.py index 8f7309d4ed1e..e7584b071a68 100644 --- a/src/diffusers/loaders/lora_conversion_utils.py +++ b/src/diffusers/loaders/lora_conversion_utils.py @@ -2321,8 +2321,14 @@ def _convert_non_diffusers_flux2_lora_to_diffusers(state_dict): prefix = "diffusion_model." original_state_dict = {k[len(prefix) :]: v for k, v in state_dict.items()} - num_double_layers = 8 - num_single_layers = 48 + num_double_layers = 0 + num_single_layers = 0 + for key in original_state_dict.keys(): + if key.startswith("single_blocks."): + num_single_layers = max(num_single_layers, int(key.split('.')[1])+1) + elif key.startswith("double_blocks."): + num_double_layers = max(num_double_layers, int(key.split('.')[1])+1) + lora_keys = ("lora_A", "lora_B") attn_types = ("img_attn", "txt_attn")