Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions .github/workflows/pr_quality.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@ jobs:
pip install .[quality]
- name: Check quality
run: |
black --check examples tests src utils scripts
ruff examples tests src utils scripts
doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source
ruff check examples tests src utils scripts
ruff format examples tests src utils scripts --check

check_repository_consistency:
runs-on: ubuntu-latest
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ Diffusers has grown a lot. Here is the command for it:
$ make test
```

🧨 Diffusers relies on `black` and `isort` to format its source code
🧨 Diffusers relies on `ruff` and `isort` to format its source code
consistently. After you make changes, apply automatic style corrections and code verifications
that can't be automated in one go with:

Expand Down
14 changes: 6 additions & 8 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ modified_only_fixup:
$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
@if test -n "$(modified_py_files)"; then \
echo "Checking/fixing $(modified_py_files)"; \
black $(modified_py_files); \
ruff $(modified_py_files); \
ruff check $(modified_py_files) --fix; \
ruff format $(modified_py_files);\
else \
echo "No library .py files were modified"; \
fi
Expand Down Expand Up @@ -40,23 +40,21 @@ repo-consistency:
# this target runs checks on all files

quality:
black --check $(check_dirs)
ruff $(check_dirs)
doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source
ruff check $(check_dirs) setup.py
ruff format --check $(check_dirs) setup.py
python utils/check_doc_toc.py

# Format source code automatically and check is there are any problems left that need manual fixing

extra_style_checks:
python utils/custom_init_isort.py
doc-builder style src/diffusers docs/source --max_len 119 --path_to_docs docs/source
python utils/check_doc_toc.py --fix_and_overwrite

# this target runs checks on all files and potentially modifies some of them

style:
black $(check_dirs)
ruff $(check_dirs) --fix
ruff check $(check_dirs) setup.py --fix
ruff format $(check_dirs) setup.py
${MAKE} autogenerate_code
${MAKE} extra_style_checks

Expand Down
1 change: 1 addition & 0 deletions examples/community/composable_stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline):
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""

_optional_components = ["safety_checker", "feature_extractor"]

def __init__(
Expand Down
4 changes: 1 addition & 3 deletions examples/community/latent_consistency_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -564,9 +564,7 @@ def __init__(
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
Expand Down
4 changes: 1 addition & 3 deletions examples/community/latent_consistency_txt2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -469,9 +469,7 @@ def __init__(
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
)
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
Expand Down
10 changes: 5 additions & 5 deletions examples/community/lpw_stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ def parse_prompt_attention(text):
(abc) - increases attention to abc by a multiplier of 1.1
(abc:3.12) - increases attention to abc by a multiplier of 3.12
[abc] - decreases attention to abc by a multiplier of 1.1
\( - literal character '('
\[ - literal character '['
\) - literal character ')'
\] - literal character ']'
\\( - literal character '('
\\[ - literal character '['
\\) - literal character ')'
\\] - literal character ']'
\\ - literal character '\'
anything else - just text
>>> parse_prompt_attention('normal text')
Expand All @@ -68,7 +68,7 @@ def parse_prompt_attention(text):
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
>>> parse_prompt_attention('(unbalanced')
[['unbalanced', 1.1]]
>>> parse_prompt_attention('\(literal\]')
>>> parse_prompt_attention('\\(literal\\]')
[['(literal]', 1.0]]
>>> parse_prompt_attention('(unnecessary)(parens)')
[['unnecessaryparens', 1.1]]
Expand Down
11 changes: 6 additions & 5 deletions examples/community/lpw_stable_diffusion_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,10 @@ def parse_prompt_attention(text):
(abc) - increases attention to abc by a multiplier of 1.1
(abc:3.12) - increases attention to abc by a multiplier of 3.12
[abc] - decreases attention to abc by a multiplier of 1.1
\( - literal character '('
\[ - literal character '['
\) - literal character ')'
\] - literal character ']'
\\( - literal character '('
\\[ - literal character '['
\\) - literal character ')'
\\] - literal character ']'
\\ - literal character '\'
anything else - just text
>>> parse_prompt_attention('normal text')
Expand All @@ -94,7 +94,7 @@ def parse_prompt_attention(text):
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
>>> parse_prompt_attention('(unbalanced')
[['unbalanced', 1.1]]
>>> parse_prompt_attention('\(literal\]')
>>> parse_prompt_attention('\\(literal\\]')
[['(literal]', 1.0]]
>>> parse_prompt_attention('(unnecessary)(parens)')
[['unnecessaryparens', 1.1]]
Expand Down Expand Up @@ -433,6 +433,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
"""

if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):

def __init__(
Expand Down
10 changes: 5 additions & 5 deletions examples/community/lpw_stable_diffusion_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ def parse_prompt_attention(text):
(abc) - increases attention to abc by a multiplier of 1.1
(abc:3.12) - increases attention to abc by a multiplier of 3.12
[abc] - decreases attention to abc by a multiplier of 1.1
\( - literal character '('
\[ - literal character '['
\) - literal character ')'
\] - literal character ']'
\\( - literal character '('
\\[ - literal character '['
\\) - literal character ')'
\\] - literal character ']'
\\ - literal character '\'
anything else - just text

Expand All @@ -59,7 +59,7 @@ def parse_prompt_attention(text):
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
>>> parse_prompt_attention('(unbalanced')
[['unbalanced', 1.1]]
>>> parse_prompt_attention('\(literal\]')
>>> parse_prompt_attention('\\(literal\\]')
[['(literal]', 1.0]]
>>> parse_prompt_attention('(unnecessary)(parens)')
[['unnecessaryparens', 1.1]]
Expand Down
6 changes: 3 additions & 3 deletions examples/community/magic_mix.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,9 @@ def __call__(
timesteps=t,
)

input = (mix_factor * latents) + (
1 - mix_factor
) * orig_latents # interpolating between layout noise and conditionally generated noise to preserve layout sematics
input = (
(mix_factor * latents) + (1 - mix_factor) * orig_latents
) # interpolating between layout noise and conditionally generated noise to preserve layout sematics
input = torch.cat([input] * 2)

else: # content generation phase
Expand Down
4 changes: 1 addition & 3 deletions examples/community/mixture_canvas.py
Original file line number Diff line number Diff line change
Expand Up @@ -453,9 +453,7 @@ def __call__(
:,
region.latent_row_init : region.latent_row_end,
region.latent_col_init : region.latent_col_end,
] += (
noise_pred_region * mask_weights_region
)
] += noise_pred_region * mask_weights_region
contributors[
:,
:,
Expand Down
1 change: 1 addition & 0 deletions examples/community/pipeline_prompt2prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ class Prompt2PromptPipeline(StableDiffusionPipeline):
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""

_optional_components = ["safety_checker", "feature_extractor"]

@torch.no_grad()
Expand Down
4 changes: 3 additions & 1 deletion examples/community/pipeline_zero1to3.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline):
cc_projection ([`CCProjection`]):
Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size.
"""

_optional_components = ["safety_checker", "feature_extractor"]

def __init__(
Expand Down Expand Up @@ -658,7 +659,8 @@ def prepare_img_latents(self, image, batch_size, dtype, device, generator=None,

if isinstance(generator, list):
init_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i]) for i in range(batch_size) # sample
self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i])
for i in range(batch_size) # sample
]
init_latents = torch.cat(init_latents, dim=0)
else:
Expand Down
7 changes: 4 additions & 3 deletions examples/community/run_onnx_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -651,9 +651,10 @@ def __call__(
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = num_controlnet
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
control_guidance_end
]
control_guidance_start, control_guidance_end = (
mult * [control_guidance_start],
mult * [control_guidance_end],
)

# 1. Check inputs. Raise error if not correct
self.check_inputs(
Expand Down
7 changes: 4 additions & 3 deletions examples/community/run_tensorrt_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -755,9 +755,10 @@ def __call__(
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = num_controlnet
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
control_guidance_end
]
control_guidance_start, control_guidance_end = (
mult * [control_guidance_start],
mult * [control_guidance_end],
)

# 1. Check inputs. Raise error if not correct
self.check_inputs(
Expand Down
1 change: 1 addition & 0 deletions examples/community/sd_text2img_k_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ class StableDiffusionPipeline(DiffusionPipeline):
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""

_optional_components = ["safety_checker", "feature_extractor"]

def __init__(
Expand Down
1 change: 1 addition & 0 deletions examples/community/stable_diffusion_ipex.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""

_optional_components = ["safety_checker", "feature_extractor"]

def __init__(
Expand Down
1 change: 1 addition & 0 deletions examples/community/stable_diffusion_mega.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ class StableDiffusionMegaPipeline(DiffusionPipeline):
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""

_optional_components = ["safety_checker", "feature_extractor"]

def __init__(
Expand Down
1 change: 1 addition & 0 deletions examples/community/stable_diffusion_repaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ class StableDiffusionRepaintPipeline(DiffusionPipeline, TextualInversionLoaderMi
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""

_optional_components = ["safety_checker", "feature_extractor"]

def __init__(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,7 @@ def main(args):
unet = gemini_zero_dpp(unet, args.placement)

# config optimizer for colossalai zero
optimizer = GeminiAdamOptimizer(
unet, lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm
)
optimizer = GeminiAdamOptimizer(unet, lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm)

# load noise_scheduler
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
Expand Down
19 changes: 14 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
[tool.black]
line-length = 119
target-version = ['py37']

[tool.ruff]
# Never enforce `E501` (line length violations).
ignore = ["C901", "E501", "E741", "W605"]
ignore = ["C901", "E501", "E741", "F402", "F823"]
select = ["C", "E", "F", "I", "W"]
line-length = 119

Expand All @@ -16,3 +12,16 @@ line-length = 119
[tool.ruff.isort]
lines-after-imports = 2
known-first-party = ["diffusers"]

[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"

# Like Black, indent with spaces, rather than tabs.
indent-style = "space"

# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false

# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
2 changes: 1 addition & 1 deletion scripts/convert_kakao_brain_unclip_to_diffusers.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from diffusers.schedulers.scheduling_unclip import UnCLIPScheduler


"""
r"""
Example - From the diffusers root directory:

Download weights:
Expand Down
20 changes: 0 additions & 20 deletions setup.cfg

This file was deleted.

Loading