Skip to content

Commit 5b20211

Browse files
authored
Fix typos (huggingface#12705)
1 parent 4ac2b4a commit 5b20211

File tree

8 files changed

+11
-11
lines changed

8 files changed

+11
-11
lines changed

docs/source/en/modular_diffusers/custom_blocks.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ class Florence2ImageAnnotatorBlock(ModularPipelineBlocks):
140140
type_hint=str,
141141
required=True,
142142
default="mask_image",
143-
description="""Output type from annotation predictions. Availabe options are
143+
description="""Output type from annotation predictions. Available options are
144144
mask_image:
145145
-black and white mask image for the given image based on the task type
146146
mask_overlay:
@@ -256,7 +256,7 @@ class Florence2ImageAnnotatorBlock(ModularPipelineBlocks):
256256
type_hint=str,
257257
required=True,
258258
default="mask_image",
259-
description="""Output type from annotation predictions. Availabe options are
259+
description="""Output type from annotation predictions. Available options are
260260
mask_image:
261261
-black and white mask image for the given image based on the task type
262262
mask_overlay:

docs/source/en/modular_diffusers/loop_sequential_pipeline_blocks.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ The loop wrapper can pass additional arguments, like current iteration index, to
5353

5454
A loop block is a [`~modular_pipelines.ModularPipelineBlocks`], but the `__call__` method behaves differently.
5555

56-
- It recieves the iteration variable from the loop wrapper.
56+
- It receives the iteration variable from the loop wrapper.
5757
- It works directly with the [`~modular_pipelines.BlockState`] instead of the [`~modular_pipelines.PipelineState`].
5858
- It doesn't require retrieving or updating the [`~modular_pipelines.BlockState`].
5959

examples/cogvideo/train_cogvideox_image_to_video_lora.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,13 +149,13 @@ def get_args():
149149
"--validation_prompt",
150150
type=str,
151151
default=None,
152-
help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_seperator' string.",
152+
help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_separator' string.",
153153
)
154154
parser.add_argument(
155155
"--validation_images",
156156
type=str,
157157
default=None,
158-
help="One or more image path(s) that is used during validation to verify that the model is learning. Multiple validation paths should be separated by the '--validation_prompt_seperator' string. These should correspond to the order of the validation prompts.",
158+
help="One or more image path(s) that is used during validation to verify that the model is learning. Multiple validation paths should be separated by the '--validation_prompt_separator' string. These should correspond to the order of the validation prompts.",
159159
)
160160
parser.add_argument(
161161
"--validation_prompt_separator",

examples/cogvideo/train_cogvideox_lora.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def get_args():
140140
"--validation_prompt",
141141
type=str,
142142
default=None,
143-
help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_seperator' string.",
143+
help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_separator' string.",
144144
)
145145
parser.add_argument(
146146
"--validation_prompt_separator",

examples/research_projects/onnxruntime/text_to_image/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ The `train_text_to_image.py` script shows how to fine-tune stable diffusion mode
44

55
___Note___:
66

7-
___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparamters to get the best result on your dataset.___
7+
___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___
88

99

1010
## Running locally with PyTorch

examples/research_projects/sdxl_flax/sdxl_single.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
NUM_DEVICES = jax.device_count()
1919

2020
# 1. Let's start by downloading the model and loading it into our pipeline class
21-
# Adhering to JAX's functional approach, the model's parameters are returned seperatetely and
21+
# Adhering to JAX's functional approach, the model's parameters are returned separately and
2222
# will have to be passed to the pipeline during inference
2323
pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained(
2424
"stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True

src/diffusers/modular_pipelines/qwenimage/before_denoise.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,7 @@ class QwenImageSetTimestepsStep(ModularPipelineBlocks):
455455

456456
@property
457457
def description(self) -> str:
458-
return "Step that sets the the scheduler's timesteps for text-to-image generation. Should be run after prepare latents step."
458+
return "Step that sets the scheduler's timesteps for text-to-image generation. Should be run after prepare latents step."
459459

460460
@property
461461
def expected_components(self) -> List[ComponentSpec]:
@@ -579,7 +579,7 @@ class QwenImageSetTimestepsWithStrengthStep(ModularPipelineBlocks):
579579

580580
@property
581581
def description(self) -> str:
582-
return "Step that sets the the scheduler's timesteps for image-to-image generation, and inpainting. Should be run after prepare latents step."
582+
return "Step that sets the scheduler's timesteps for image-to-image generation, and inpainting. Should be run after prepare latents step."
583583

584584
@property
585585
def expected_components(self) -> List[ComponentSpec]:

src/diffusers/pipelines/lucy/pipeline_lucy_edit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
# limitations under the License.
1515
#
1616
# Modifications by Decart AI Team:
17-
# - Based on pipeline_wan.py, but with supports recieving a condition video appended to the channel dimension.
17+
# - Based on pipeline_wan.py, but with supports receiving a condition video appended to the channel dimension.
1818

1919
import html
2020
from typing import Any, Callable, Dict, List, Optional, Tuple, Union

0 commit comments

Comments
 (0)