@@ -246,10 +246,8 @@ def lora_state_dict(
246246        r""" 
247247        Return state dict for lora weights and the network alphas. 
248248
249-         > [!WARNING] 
250-         > We support loading A1111 formatted LoRA checkpoints in a limited capacity. 
251-         > 
252-         > This function is experimental and might change in the future. 
249+         > [!WARNING] > We support loading A1111 formatted LoRA checkpoints in a limited capacity. > > This function is 
250+         experimental and might change in the future. 
253251
254252        Parameters: 
255253            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): 
@@ -542,8 +540,7 @@ def fuse_lora(
542540        r""" 
543541        Fuses the LoRA parameters into the original parameters of the corresponding blocks. 
544542
545-         > [!WARNING] 
546-         > This is an experimental API. 
543+         > [!WARNING] > This is an experimental API. 
547544
548545        Args: 
549546            components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. 
@@ -580,8 +577,7 @@ def unfuse_lora(self, components: List[str] = ["unet", "text_encoder"], **kwargs
580577        Reverses the effect of 
581578        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). 
582579
583-         > [!WARNING] 
584-         > This is an experimental API. 
580+         > [!WARNING] > This is an experimental API. 
585581
586582        Args: 
587583            components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. 
@@ -689,10 +685,8 @@ def lora_state_dict(
689685        r""" 
690686        Return state dict for lora weights and the network alphas. 
691687
692-         > [!WARNING] 
693-         > We support loading A1111 formatted LoRA checkpoints in a limited capacity. 
694-         > 
695-         > This function is experimental and might change in the future. 
688+         > [!WARNING] > We support loading A1111 formatted LoRA checkpoints in a limited capacity. > > This function is 
689+         experimental and might change in the future. 
696690
697691        Parameters: 
698692            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): 
@@ -1995,8 +1989,7 @@ def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], *
19951989        Reverses the effect of 
19961990        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). 
19971991
1998-         > [!WARNING] 
1999-         > This is an experimental API. 
1992+         > [!WARNING] > This is an experimental API. 
20001993
20011994        Args: 
20021995            components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. 
0 commit comments