Skip to content

Commit 1e8cf27

Browse files
DN6sayakpaulyiyixuxu
authored
[CI] Nightly Test Updates (#9380)
* update * update * update * update * update --------- Co-authored-by: Sayak Paul <spsayakpaul@gmail.com> Co-authored-by: YiYi Xu <yixu310@gmail.com>
1 parent 6cf8d98 commit 1e8cf27

36 files changed

+260
-893
lines changed

tests/lora/test_lora_layers_sd.py

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import torch
2121
import torch.nn as nn
2222
from huggingface_hub import hf_hub_download
23-
from huggingface_hub.repocard import RepoCard
2423
from safetensors.torch import load_file
2524
from transformers import CLIPTextModel, CLIPTokenizer
2625

@@ -103,7 +102,7 @@ def tearDown(self):
103102
@slow
104103
@require_torch_gpu
105104
def test_integration_move_lora_cpu(self):
106-
path = "runwayml/stable-diffusion-v1-5"
105+
path = "Jiali/stable-diffusion-1.5"
107106
lora_id = "takuma104/lora-test-text-encoder-lora-target"
108107

109108
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
@@ -162,7 +161,7 @@ def test_integration_move_lora_cpu(self):
162161
def test_integration_move_lora_dora_cpu(self):
163162
from peft import LoraConfig
164163

165-
path = "Lykon/dreamshaper-8"
164+
path = "Jiali/stable-diffusion-1.5"
166165
unet_lora_config = LoraConfig(
167166
init_lora_weights="gaussian",
168167
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
@@ -222,7 +221,7 @@ def tearDown(self):
222221
torch.cuda.empty_cache()
223222

224223
def test_integration_logits_with_scale(self):
225-
path = "runwayml/stable-diffusion-v1-5"
224+
path = "Jiali/stable-diffusion-1.5"
226225
lora_id = "takuma104/lora-test-text-encoder-lora-target"
227226

228227
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32)
@@ -254,7 +253,7 @@ def test_integration_logits_with_scale(self):
254253
release_memory(pipe)
255254

256255
def test_integration_logits_no_scale(self):
257-
path = "runwayml/stable-diffusion-v1-5"
256+
path = "Jiali/stable-diffusion-1.5"
258257
lora_id = "takuma104/lora-test-text-encoder-lora-target"
259258

260259
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32)
@@ -284,8 +283,8 @@ def test_dreambooth_old_format(self):
284283
generator = torch.Generator("cpu").manual_seed(0)
285284

286285
lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example"
287-
card = RepoCard.load(lora_model_id)
288-
base_model_id = card.data.to_dict()["base_model"]
286+
287+
base_model_id = "Jiali/stable-diffusion-1.5"
289288

290289
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
291290
pipe = pipe.to(torch_device)
@@ -308,8 +307,8 @@ def test_dreambooth_text_encoder_new_format(self):
308307
generator = torch.Generator().manual_seed(0)
309308

310309
lora_model_id = "hf-internal-testing/lora-trained"
311-
card = RepoCard.load(lora_model_id)
312-
base_model_id = card.data.to_dict()["base_model"]
310+
311+
base_model_id = "Jiali/stable-diffusion-1.5"
313312

314313
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
315314
pipe = pipe.to(torch_device)
@@ -420,7 +419,7 @@ def test_a1111_with_sequential_cpu_offload(self):
420419
def test_kohya_sd_v15_with_higher_dimensions(self):
421420
generator = torch.Generator().manual_seed(0)
422421

423-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
422+
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to(
424423
torch_device
425424
)
426425
lora_model_id = "hf-internal-testing/urushisato-lora"
@@ -444,8 +443,8 @@ def test_vanilla_funetuning(self):
444443
generator = torch.Generator().manual_seed(0)
445444

446445
lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4"
447-
card = RepoCard.load(lora_model_id)
448-
base_model_id = card.data.to_dict()["base_model"]
446+
447+
base_model_id = "Jiali/stable-diffusion-1.5"
449448

450449
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
451450
pipe = pipe.to(torch_device)
@@ -468,7 +467,7 @@ def test_unload_kohya_lora(self):
468467
prompt = "masterpiece, best quality, mountain"
469468
num_inference_steps = 2
470469

471-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
470+
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to(
472471
torch_device
473472
)
474473
initial_images = pipe(
@@ -506,7 +505,7 @@ def test_load_unload_load_kohya_lora(self):
506505
prompt = "masterpiece, best quality, mountain"
507506
num_inference_steps = 2
508507

509-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None).to(
508+
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", safety_checker=None).to(
510509
torch_device
511510
)
512511
initial_images = pipe(
@@ -548,9 +547,9 @@ def test_load_unload_load_kohya_lora(self):
548547

549548
def test_not_empty_state_dict(self):
550549
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
551-
pipe = AutoPipelineForText2Image.from_pretrained(
552-
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
553-
).to(torch_device)
550+
pipe = AutoPipelineForText2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to(
551+
torch_device
552+
)
554553
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
555554

556555
cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors")
@@ -562,9 +561,9 @@ def test_not_empty_state_dict(self):
562561

563562
def test_load_unload_load_state_dict(self):
564563
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
565-
pipe = AutoPipelineForText2Image.from_pretrained(
566-
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
567-
).to(torch_device)
564+
pipe = AutoPipelineForText2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16).to(
565+
torch_device
566+
)
568567
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
569568

570569
cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors")
@@ -581,7 +580,7 @@ def test_load_unload_load_state_dict(self):
581580
release_memory(pipe)
582581

583582
def test_sdv1_5_lcm_lora(self):
584-
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
583+
pipe = DiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16)
585584
pipe.to(torch_device)
586585
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
587586

@@ -609,7 +608,7 @@ def test_sdv1_5_lcm_lora(self):
609608
release_memory(pipe)
610609

611610
def test_sdv1_5_lcm_lora_img2img(self):
612-
pipe = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
611+
pipe = AutoPipelineForImage2Image.from_pretrained("Jiali/stable-diffusion-1.5", torch_dtype=torch.float16)
613612
pipe.to(torch_device)
614613
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
615614

@@ -650,7 +649,7 @@ def test_sd_load_civitai_empty_network_alpha(self):
650649
This test simply checks that loading a LoRA with an empty network alpha works fine
651650
See: https://github.com/huggingface/diffusers/issues/5606
652651
"""
653-
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
652+
pipeline = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5")
654653
pipeline.enable_sequential_cpu_offload()
655654
civitai_path = hf_hub_download("ybelkada/test-ahi-civitai", "ahi_lora_weights.safetensors")
656655
pipeline.load_lora_weights(civitai_path, adapter_name="ahri")

tests/models/autoencoders/test_models_vae.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,7 +1051,7 @@ def test_encode_decode(self):
10511051

10521052
def test_sd(self):
10531053
vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update
1054-
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None)
1054+
pipe = StableDiffusionPipeline.from_pretrained("Jiali/stable-diffusion-1.5", vae=vae, safety_checker=None)
10551055
pipe.to(torch_device)
10561056

10571057
out = pipe(
@@ -1099,7 +1099,7 @@ def test_sd_f16(self):
10991099
"openai/consistency-decoder", torch_dtype=torch.float16
11001100
) # TODO - update
11011101
pipe = StableDiffusionPipeline.from_pretrained(
1102-
"runwayml/stable-diffusion-v1-5",
1102+
"Jiali/stable-diffusion-1.5",
11031103
torch_dtype=torch.float16,
11041104
vae=vae,
11051105
safety_checker=None,
@@ -1124,7 +1124,7 @@ def test_sd_f16(self):
11241124
def test_vae_tiling(self):
11251125
vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
11261126
pipe = StableDiffusionPipeline.from_pretrained(
1127-
"runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None, torch_dtype=torch.float16
1127+
"Jiali/stable-diffusion-1.5", vae=vae, safety_checker=None, torch_dtype=torch.float16
11281128
)
11291129
pipe.to(torch_device)
11301130
pipe.set_progress_bar_config(disable=None)

tests/models/unets/test_models_unet_2d_condition.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1376,7 +1376,7 @@ def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice):
13761376
@require_torch_accelerator
13771377
@skip_mps
13781378
def test_compvis_sd_v1_5(self, seed, timestep, expected_slice):
1379-
model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5")
1379+
model = self.get_unet_model(model_id="Jiali/stable-diffusion-1.5")
13801380
latents = self.get_latents(seed)
13811381
encoder_hidden_states = self.get_encoder_hidden_states(seed)
13821382

@@ -1404,7 +1404,7 @@ def test_compvis_sd_v1_5(self, seed, timestep, expected_slice):
14041404
)
14051405
@require_torch_accelerator_with_fp16
14061406
def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice):
1407-
model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5", fp16=True)
1407+
model = self.get_unet_model(model_id="Jiali/stable-diffusion-1.5", fp16=True)
14081408
latents = self.get_latents(seed, fp16=True)
14091409
encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)
14101410

@@ -1433,7 +1433,7 @@ def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice):
14331433
@require_torch_accelerator
14341434
@skip_mps
14351435
def test_compvis_sd_inpaint(self, seed, timestep, expected_slice):
1436-
model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting")
1436+
model = self.get_unet_model(model_id="botp/stable-diffusion-v1-5-inpainting")
14371437
latents = self.get_latents(seed, shape=(4, 9, 64, 64))
14381438
encoder_hidden_states = self.get_encoder_hidden_states(seed)
14391439

@@ -1461,7 +1461,7 @@ def test_compvis_sd_inpaint(self, seed, timestep, expected_slice):
14611461
)
14621462
@require_torch_accelerator_with_fp16
14631463
def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice):
1464-
model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting", fp16=True)
1464+
model = self.get_unet_model(model_id="botp/stable-diffusion-v1-5-inpainting", fp16=True)
14651465
latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True)
14661466
encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)
14671467

tests/pipelines/amused/test_amused.py

Lines changed: 15 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,19 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
1716
import unittest
1817

1918
import numpy as np
2019
import torch
2120
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
2221

2322
from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel
24-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
23+
from diffusers.utils.testing_utils import (
24+
enable_full_determinism,
25+
require_torch_gpu,
26+
slow,
27+
torch_device,
28+
)
2529

2630
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2731
from ..test_pipelines_common import PipelineTesterMixin
@@ -65,19 +69,15 @@ def get_dummy_components(self):
6569
vqvae = VQModel(
6670
act_fn="silu",
6771
block_out_channels=[8],
68-
down_block_types=[
69-
"DownEncoderBlock2D",
70-
],
72+
down_block_types=["DownEncoderBlock2D"],
7173
in_channels=3,
7274
latent_channels=8,
7375
layers_per_block=1,
7476
norm_num_groups=8,
7577
num_vq_embeddings=8,
7678
out_channels=3,
7779
sample_size=8,
78-
up_block_types=[
79-
"UpDecoderBlock2D",
80-
],
80+
up_block_types=["UpDecoderBlock2D"],
8181
mid_block_add_attention=False,
8282
lookup_from_codebook=True,
8383
)
@@ -96,7 +96,6 @@ def get_dummy_components(self):
9696
)
9797
text_encoder = CLIPTextModelWithProjection(text_encoder_config)
9898
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
99-
10099
components = {
101100
"transformer": transformer,
102101
"scheduler": scheduler,
@@ -135,47 +134,37 @@ class AmusedPipelineSlowTests(unittest.TestCase):
135134
def test_amused_256(self):
136135
pipe = AmusedPipeline.from_pretrained("amused/amused-256")
137136
pipe.to(torch_device)
138-
139137
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
140-
141138
image_slice = image[0, -3:, -3:, -1].flatten()
142-
143139
assert image.shape == (1, 256, 256, 3)
144-
expected_slice = np.array([0.4011, 0.3992, 0.3790, 0.3856, 0.3772, 0.3711, 0.3919, 0.3850, 0.3625])
145-
assert np.abs(image_slice - expected_slice).max() < 3e-3
140+
expected_slice = np.array([0.4011, 0.3992, 0.379, 0.3856, 0.3772, 0.3711, 0.3919, 0.385, 0.3625])
141+
assert np.abs(image_slice - expected_slice).max() < 0.003
146142

147143
def test_amused_256_fp16(self):
148144
pipe = AmusedPipeline.from_pretrained("amused/amused-256", variant="fp16", torch_dtype=torch.float16)
149145
pipe.to(torch_device)
150-
151146
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
152-
153147
image_slice = image[0, -3:, -3:, -1].flatten()
154-
155148
assert image.shape == (1, 256, 256, 3)
156149
expected_slice = np.array([0.0554, 0.05129, 0.0344, 0.0452, 0.0476, 0.0271, 0.0495, 0.0527, 0.0158])
157-
assert np.abs(image_slice - expected_slice).max() < 7e-3
150+
assert np.abs(image_slice - expected_slice).max() < 0.007
158151

159152
def test_amused_512(self):
160153
pipe = AmusedPipeline.from_pretrained("amused/amused-512")
161154
pipe.to(torch_device)
162-
163155
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
164-
165156
image_slice = image[0, -3:, -3:, -1].flatten()
166157

167158
assert image.shape == (1, 512, 512, 3)
168-
expected_slice = np.array([0.9960, 0.9960, 0.9946, 0.9980, 0.9947, 0.9932, 0.9960, 0.9961, 0.9947])
169-
assert np.abs(image_slice - expected_slice).max() < 3e-3
159+
expected_slice = np.array([0.1199, 0.1171, 0.1229, 0.1188, 0.1210, 0.1147, 0.1260, 0.1346, 0.1152])
160+
assert np.abs(image_slice - expected_slice).max() < 0.003
170161

171162
def test_amused_512_fp16(self):
172163
pipe = AmusedPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16)
173164
pipe.to(torch_device)
174-
175165
image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images
176-
177166
image_slice = image[0, -3:, -3:, -1].flatten()
178167

179168
assert image.shape == (1, 512, 512, 3)
180-
expected_slice = np.array([0.9983, 1.0, 1.0, 1.0, 1.0, 0.9989, 0.9994, 0.9976, 0.9977])
181-
assert np.abs(image_slice - expected_slice).max() < 3e-3
169+
expected_slice = np.array([0.1509, 0.1492, 0.1531, 0.1485, 0.1501, 0.1465, 0.1581, 0.1690, 0.1499])
170+
assert np.abs(image_slice - expected_slice).max() < 0.003

0 commit comments

Comments
 (0)