Skip to content

Diffuser test #2141

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements/test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ xlsxwriter # thunder/benchmarks/test_benchmark_litgpt.py
jsonargparse # thunder/benchmarks/benchmark_litgpt.py
bitsandbytes==0.42.0 # fixed version!
transformers==4.50.3 # for test_networks.py
diffusers==0.33.0 # for test_networks.py

# Installs JAX on Linux and MacOS
jaxlib; sys_platform == 'linux' or sys_platform == 'darwin' # required for jax, see https://github.com/google/jax#installation
Expand Down
83 changes: 83 additions & 0 deletions thunder/tests/test_networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,3 +695,86 @@ def test_hf_kvcache():

assert_close(j_static_cache.key_cache, ref_static_cache.key_cache, rtol=1e-1, atol=1e-1)
assert_close(j_static_cache.value_cache, ref_static_cache.value_cache, rtol=1e-1, atol=1e-1)


hf_diffusers_unet2d_condition_model_ids = [
"runwayml/stable-diffusion-v1-5",
"CompVis/stable-diffusion-v1-4",
"ionet-official/bc8-alpha",
"stabilityai/sd-turbo",
"runwayml/stable-diffusion-inpainting",
"stabilityai/stable-diffusion-xl-base-1.0",
"stabilityai/stable-diffusion-xl-refiner-1.0",
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
]


@thunder.tests.framework.requiresCUDA
@pytest.mark.parametrize("model_id", hf_diffusers_unet2d_condition_model_ids)
def test_hf_diffusers(model_id):
from thunder.dynamo import thunderfx
from diffusers import UNet2DConditionModel

torch.manual_seed(0)

try:
unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet", torch_dtype=torch.bfloat16)
except OSError:
unet = UNet2DConditionModel.from_pretrained(
model_id, subfolder="unet", use_safetensors=False, torch_dtype=torch.bfloat16
)

config = unet.config
in_channels = config.in_channels
cross_attention_dim = config.cross_attention_dim
addition_embed_type = config.addition_embed_type

sample_size = 16
batch_size = 1
seq_length = 8

if "xl" in model_id:
time_ids_dim = 6
text_embeds_dim = 1280
if "refiner" in model_id:
time_ids_dim = 2
text_embeds_dim = 2048
else:
time_ids_dim = None
text_embeds_dim = None

input_shape = (batch_size, in_channels, sample_size, sample_size)
hidden_states_shape = (batch_size, seq_length, cross_attention_dim)

unet = unet.to("cuda", dtype=torch.bfloat16).requires_grad_(True)
compiled_model = thunderfx(unet)

def make_inputs(dtype=torch.bfloat16):
added_cond_kwargs = {}
with torch.device("cuda"):
input = torch.randn(input_shape, dtype=dtype)
hidden_states = torch.randn(hidden_states_shape, dtype=dtype)
timestep = torch.ones(batch_size, dtype=torch.long)
if addition_embed_type is not None:
assert text_embeds_dim is not None and time_ids_dim is not None
time_ids_shape = (batch_size, time_ids_dim)
text_embeds_shape = (batch_size, text_embeds_dim)
added_cond_kwargs["time_ids"] = torch.randn(time_ids_shape, device="cuda", dtype=dtype)
added_cond_kwargs["text_embeds"] = torch.randn(text_embeds_shape, device="cuda", dtype=dtype)
return (input, timestep, hidden_states), {"added_cond_kwargs": added_cond_kwargs}

compiled_args, compiled_kwargs = make_inputs(torch.bfloat16)
compiled_output = compiled_model(*compiled_args, **compiled_kwargs)

ref_output = unet(*compiled_args, **compiled_kwargs)

ref_output = ref_output.sample
compiled_output = compiled_output.sample

torch.testing.assert_close(compiled_output, ref_output, rtol=1e-2, atol=2e-1)

# TODO: Currently fails, needs investigation https://github.com/Lightning-AI/lightning-thunder/issues/2153
# loss_grad = torch.randn_like(compiled_output)
# grads_ref = torch.autograd.grad(ref_output, unet.parameters(), grad_outputs=loss_grad)
# grads_compiled = torch.autograd.grad(compiled_output, unet.parameters(), grad_outputs=loss_grad)
# torch.testing.assert_close(grads_ref, grads_compiled, rtol=1e-1, atol=1e-1)
Loading