20
20
import torch
21
21
import torch .nn as nn
22
22
from huggingface_hub import hf_hub_download
23
- from huggingface_hub .repocard import RepoCard
24
23
from safetensors .torch import load_file
25
24
from transformers import CLIPTextModel , CLIPTokenizer
26
25
@@ -103,7 +102,7 @@ def tearDown(self):
103
102
@slow
104
103
@require_torch_gpu
105
104
def test_integration_move_lora_cpu (self ):
106
- path = "runwayml /stable-diffusion-v1- 5"
105
+ path = "Jiali /stable-diffusion-1. 5"
107
106
lora_id = "takuma104/lora-test-text-encoder-lora-target"
108
107
109
108
pipe = StableDiffusionPipeline .from_pretrained (path , torch_dtype = torch .float16 )
@@ -162,7 +161,7 @@ def test_integration_move_lora_cpu(self):
162
161
def test_integration_move_lora_dora_cpu (self ):
163
162
from peft import LoraConfig
164
163
165
- path = "Lykon/dreamshaper-8 "
164
+ path = "Jiali/stable-diffusion-1.5 "
166
165
unet_lora_config = LoraConfig (
167
166
init_lora_weights = "gaussian" ,
168
167
target_modules = ["to_k" , "to_q" , "to_v" , "to_out.0" ],
@@ -222,7 +221,7 @@ def tearDown(self):
222
221
torch .cuda .empty_cache ()
223
222
224
223
def test_integration_logits_with_scale (self ):
225
- path = "runwayml /stable-diffusion-v1- 5"
224
+ path = "Jiali /stable-diffusion-1. 5"
226
225
lora_id = "takuma104/lora-test-text-encoder-lora-target"
227
226
228
227
pipe = StableDiffusionPipeline .from_pretrained (path , torch_dtype = torch .float32 )
@@ -254,7 +253,7 @@ def test_integration_logits_with_scale(self):
254
253
release_memory (pipe )
255
254
256
255
def test_integration_logits_no_scale (self ):
257
- path = "runwayml /stable-diffusion-v1- 5"
256
+ path = "Jiali /stable-diffusion-1. 5"
258
257
lora_id = "takuma104/lora-test-text-encoder-lora-target"
259
258
260
259
pipe = StableDiffusionPipeline .from_pretrained (path , torch_dtype = torch .float32 )
@@ -284,8 +283,8 @@ def test_dreambooth_old_format(self):
284
283
generator = torch .Generator ("cpu" ).manual_seed (0 )
285
284
286
285
lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example"
287
- card = RepoCard . load ( lora_model_id )
288
- base_model_id = card . data . to_dict ()[ "base_model" ]
286
+
287
+ base_model_id = "Jiali/stable-diffusion-1.5"
289
288
290
289
pipe = StableDiffusionPipeline .from_pretrained (base_model_id , safety_checker = None )
291
290
pipe = pipe .to (torch_device )
@@ -308,8 +307,8 @@ def test_dreambooth_text_encoder_new_format(self):
308
307
generator = torch .Generator ().manual_seed (0 )
309
308
310
309
lora_model_id = "hf-internal-testing/lora-trained"
311
- card = RepoCard . load ( lora_model_id )
312
- base_model_id = card . data . to_dict ()[ "base_model" ]
310
+
311
+ base_model_id = "Jiali/stable-diffusion-1.5"
313
312
314
313
pipe = StableDiffusionPipeline .from_pretrained (base_model_id , safety_checker = None )
315
314
pipe = pipe .to (torch_device )
@@ -420,7 +419,7 @@ def test_a1111_with_sequential_cpu_offload(self):
420
419
def test_kohya_sd_v15_with_higher_dimensions (self ):
421
420
generator = torch .Generator ().manual_seed (0 )
422
421
423
- pipe = StableDiffusionPipeline .from_pretrained ("runwayml /stable-diffusion-v1- 5" , safety_checker = None ).to (
422
+ pipe = StableDiffusionPipeline .from_pretrained ("Jiali /stable-diffusion-1. 5" , safety_checker = None ).to (
424
423
torch_device
425
424
)
426
425
lora_model_id = "hf-internal-testing/urushisato-lora"
@@ -444,8 +443,8 @@ def test_vanilla_funetuning(self):
444
443
generator = torch .Generator ().manual_seed (0 )
445
444
446
445
lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4"
447
- card = RepoCard . load ( lora_model_id )
448
- base_model_id = card . data . to_dict ()[ "base_model" ]
446
+
447
+ base_model_id = "Jiali/stable-diffusion-1.5"
449
448
450
449
pipe = StableDiffusionPipeline .from_pretrained (base_model_id , safety_checker = None )
451
450
pipe = pipe .to (torch_device )
@@ -468,7 +467,7 @@ def test_unload_kohya_lora(self):
468
467
prompt = "masterpiece, best quality, mountain"
469
468
num_inference_steps = 2
470
469
471
- pipe = StableDiffusionPipeline .from_pretrained ("runwayml /stable-diffusion-v1- 5" , safety_checker = None ).to (
470
+ pipe = StableDiffusionPipeline .from_pretrained ("Jiali /stable-diffusion-1. 5" , safety_checker = None ).to (
472
471
torch_device
473
472
)
474
473
initial_images = pipe (
@@ -506,7 +505,7 @@ def test_load_unload_load_kohya_lora(self):
506
505
prompt = "masterpiece, best quality, mountain"
507
506
num_inference_steps = 2
508
507
509
- pipe = StableDiffusionPipeline .from_pretrained ("runwayml /stable-diffusion-v1- 5" , safety_checker = None ).to (
508
+ pipe = StableDiffusionPipeline .from_pretrained ("Jiali /stable-diffusion-1. 5" , safety_checker = None ).to (
510
509
torch_device
511
510
)
512
511
initial_images = pipe (
@@ -548,9 +547,9 @@ def test_load_unload_load_kohya_lora(self):
548
547
549
548
def test_not_empty_state_dict (self ):
550
549
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
551
- pipe = AutoPipelineForText2Image .from_pretrained (
552
- "runwayml/stable-diffusion-v1-5" , torch_dtype = torch . float16
553
- ). to ( torch_device )
550
+ pipe = AutoPipelineForText2Image .from_pretrained ("Jiali/stable-diffusion-1.5" , torch_dtype = torch . float16 ). to (
551
+ torch_device
552
+ )
554
553
pipe .scheduler = LCMScheduler .from_config (pipe .scheduler .config )
555
554
556
555
cached_file = hf_hub_download ("hf-internal-testing/lcm-lora-test-sd-v1-5" , "test_lora.safetensors" )
@@ -562,9 +561,9 @@ def test_not_empty_state_dict(self):
562
561
563
562
def test_load_unload_load_state_dict (self ):
564
563
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
565
- pipe = AutoPipelineForText2Image .from_pretrained (
566
- "runwayml/stable-diffusion-v1-5" , torch_dtype = torch . float16
567
- ). to ( torch_device )
564
+ pipe = AutoPipelineForText2Image .from_pretrained ("Jiali/stable-diffusion-1.5" , torch_dtype = torch . float16 ). to (
565
+ torch_device
566
+ )
568
567
pipe .scheduler = LCMScheduler .from_config (pipe .scheduler .config )
569
568
570
569
cached_file = hf_hub_download ("hf-internal-testing/lcm-lora-test-sd-v1-5" , "test_lora.safetensors" )
@@ -581,7 +580,7 @@ def test_load_unload_load_state_dict(self):
581
580
release_memory (pipe )
582
581
583
582
def test_sdv1_5_lcm_lora (self ):
584
- pipe = DiffusionPipeline .from_pretrained ("runwayml /stable-diffusion-v1- 5" , torch_dtype = torch .float16 )
583
+ pipe = DiffusionPipeline .from_pretrained ("Jiali /stable-diffusion-1. 5" , torch_dtype = torch .float16 )
585
584
pipe .to (torch_device )
586
585
pipe .scheduler = LCMScheduler .from_config (pipe .scheduler .config )
587
586
@@ -609,7 +608,7 @@ def test_sdv1_5_lcm_lora(self):
609
608
release_memory (pipe )
610
609
611
610
def test_sdv1_5_lcm_lora_img2img (self ):
612
- pipe = AutoPipelineForImage2Image .from_pretrained ("runwayml /stable-diffusion-v1- 5" , torch_dtype = torch .float16 )
611
+ pipe = AutoPipelineForImage2Image .from_pretrained ("Jiali /stable-diffusion-1. 5" , torch_dtype = torch .float16 )
613
612
pipe .to (torch_device )
614
613
pipe .scheduler = LCMScheduler .from_config (pipe .scheduler .config )
615
614
@@ -650,7 +649,7 @@ def test_sd_load_civitai_empty_network_alpha(self):
650
649
This test simply checks that loading a LoRA with an empty network alpha works fine
651
650
See: https://github.com/huggingface/diffusers/issues/5606
652
651
"""
653
- pipeline = StableDiffusionPipeline .from_pretrained ("runwayml /stable-diffusion-v1- 5" )
652
+ pipeline = StableDiffusionPipeline .from_pretrained ("Jiali /stable-diffusion-1. 5" )
654
653
pipeline .enable_sequential_cpu_offload ()
655
654
civitai_path = hf_hub_download ("ybelkada/test-ahi-civitai" , "ahi_lora_weights.safetensors" )
656
655
pipeline .load_lora_weights (civitai_path , adapter_name = "ahri" )
0 commit comments