Skip to content

Commit 2fba12d

Browse files
chengzeyisayakpaul
andcommitted
[Flux] Optimize guidance creation in flux pipeline by moving it outside the loop (#9153)
* optimize guidance creation in flux pipeline by moving it outside the loop * use torch.full instead of torch.tensor to create a tensor with a single value --------- Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
1 parent 7269dda commit 2fba12d

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

src/diffusers/pipelines/flux/pipeline_flux.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -677,6 +677,13 @@ def __call__(
677677
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
678678
self._num_timesteps = len(timesteps)
679679

680+
# handle guidance
681+
if self.transformer.config.guidance_embeds:
682+
guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
683+
guidance = guidance.expand(latents.shape[0])
684+
else:
685+
guidance = None
686+
680687
# 6. Denoising loop
681688
with self.progress_bar(total=num_inference_steps) as progress_bar:
682689
for i, t in enumerate(timesteps):
@@ -686,13 +693,6 @@ def __call__(
686693
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
687694
timestep = t.expand(latents.shape[0]).to(latents.dtype)
688695

689-
# handle guidance
690-
if self.transformer.config.guidance_embeds:
691-
guidance = torch.tensor([guidance_scale], device=device)
692-
guidance = guidance.expand(latents.shape[0])
693-
else:
694-
guidance = None
695-
696696
noise_pred = self.transformer(
697697
hidden_states=latents,
698698
# YiYi notes: divide it by 1000 for now because we scale it by 1000 in the transforme rmodel (we should not keep it but I want to keep the inputs same for the model for testing)

0 commit comments

Comments
 (0)