Skip to content

Commit aebf3f4

Browse files
authored
fixing various typos (#20893)
* fixing various typos * flaky test_async_checkpoint_plugin * Apply suggestions from code review
1 parent 64b2b6a commit aebf3f4

File tree

34 files changed

+68
-65
lines changed

34 files changed

+68
-65
lines changed

.actions/assistant.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -154,8 +154,8 @@ def load_readme_description(path_dir: str, homepage: str, version: str) -> str:
154154
155155
"""
156156
path_readme = os.path.join(path_dir, "README.md")
157-
with open(path_readme, encoding="utf-8") as fo:
158-
text = fo.read()
157+
with open(path_readme, encoding="utf-8") as fopen:
158+
text = fopen.read()
159159

160160
# drop images from readme
161161
text = text.replace(
@@ -308,17 +308,17 @@ def copy_replace_imports(
308308
if ext in (".pyc",):
309309
continue
310310
# Try to parse everything else
311-
with open(fp, encoding="utf-8") as fo:
311+
with open(fp, encoding="utf-8") as fopen:
312312
try:
313-
lines = fo.readlines()
313+
lines = fopen.readlines()
314314
except UnicodeDecodeError:
315315
# a binary file, skip
316316
print(f"Skipped replacing imports for {fp}")
317317
continue
318318
lines = _replace_imports(lines, list(zip(source_imports, target_imports)), lightning_by=lightning_by)
319319
os.makedirs(os.path.dirname(fp_new), exist_ok=True)
320-
with open(fp_new, "w", encoding="utf-8") as fo:
321-
fo.writelines(lines)
320+
with open(fp_new, "w", encoding="utf-8") as fopen:
321+
fopen.writelines(lines)
322322

323323

324324
def create_mirror_package(source_dir: str, package_mapping: dict[str, str]) -> None:
@@ -370,10 +370,10 @@ def _prune_packages(req_file: str, packages: Sequence[str]) -> None:
370370

371371
@staticmethod
372372
def _replace_min(fname: str) -> None:
373-
with open(fname, encoding="utf-8") as fo:
374-
req = fo.read().replace(">=", "==")
375-
with open(fname, "w", encoding="utf-8") as fw:
376-
fw.write(req)
373+
with open(fname, encoding="utf-8") as fopen:
374+
req = fopen.read().replace(">=", "==")
375+
with open(fname, "w", encoding="utf-8") as fwrite:
376+
fwrite.write(req)
377377

378378
@staticmethod
379379
def replace_oldest_ver(requirement_fnames: Sequence[str] = REQUIREMENT_FILES_ALL) -> None:
@@ -471,15 +471,15 @@ def convert_version2nightly(ver_file: str = "src/version.info") -> None:
471471
"""Load the actual version and convert it to the nightly version."""
472472
from datetime import datetime
473473

474-
with open(ver_file) as fo:
475-
version = fo.read().strip()
474+
with open(ver_file) as fopen:
475+
version = fopen.read().strip()
476476
# parse X.Y.Z version and prune any suffix
477477
vers = re.match(r"(\d+)\.(\d+)\.(\d+).*", version)
478478
# create timestamp YYYYMMDD
479479
timestamp = datetime.now().strftime("%Y%m%d")
480480
version = f"{'.'.join(vers.groups())}.dev{timestamp}"
481-
with open(ver_file, "w") as fo:
482-
fo.write(version + os.linesep)
481+
with open(ver_file, "w") as fopen:
482+
fopen.write(version + os.linesep)
483483

484484
@staticmethod
485485
def generate_docker_tags(

dockers/base-cuda/Dockerfile

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,12 @@ ENV \
3434
MAKEFLAGS="-j2"
3535

3636
RUN \
37-
apt-get update --fix-missing && apt-get install -y wget && \
38-
apt-get update -qq --fix-missing && \
39-
NCCL_VER=$(dpkg -s libnccl2 | grep '^Version:' | awk -F ' ' '{print $2}' | awk -F '-' '{print $1}' | grep -ve '^\s*$') && \
4037
CUDA_VERSION_MM=${CUDA_VERSION%.*} && \
38+
apt-get update -qq --fix-missing && apt-get install -y wget && \
39+
NCCL_VER=$(dpkg -s libnccl2 | grep '^Version:' | awk -F ' ' '{print $2}' | awk -F '-' '{print $1}' | grep -ve '^\s*$') && \
40+
echo "NCCL version found: $NCCL_VER" && \
4141
TO_INSTALL_NCCL=$(echo -e "$MAX_ALLOWED_NCCL\n$NCCL_VER" | sort -V | head -n1)-1+cuda${CUDA_VERSION_MM} && \
42+
echo "NCCL version to install: $TO_INSTALL_NCCL" && \
4243
apt-get install -y --no-install-recommends --allow-downgrades --allow-change-held-packages \
4344
build-essential \
4445
pkg-config \
@@ -96,7 +97,7 @@ RUN \
9697
--extra-index-url="https://download.pytorch.org/whl/test/cu${CUDA_VERSION_MM//'.'/''}/"
9798

9899
RUN \
99-
# Show what we have
100+
# Show what we have \
100101
pip --version && \
101102
pip list && \
102103
python -c "import sys; ver = sys.version_info ; assert f'{ver.major}.{ver.minor}' == '$PYTHON_VERSION', ver" && \

examples/fabric/build_your_own_trainer/trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,7 @@ def load(self, state: Optional[Mapping], path: str) -> None:
418418
"""Loads a checkpoint from a given file into state.
419419
420420
Args:
421-
state: a mapping contaning model, optimizer and lr scheduler
421+
state: a mapping containing model, optimizer and lr scheduler
422422
path: the path to load the checkpoint from
423423
424424
"""

examples/fabric/meta_learning/train_fabric.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def accuracy(predictions, targets):
3030
def fast_adapt(batch, learner, loss, adaptation_steps, shots, ways):
3131
data, labels = batch
3232

33-
# Separate data into adaptation/evalutation sets
33+
# Separate data into adaptation/evaluation sets
3434
adaptation_indices = torch.zeros(data.size(0), dtype=bool)
3535
adaptation_indices[torch.arange(shots * ways) * 2] = True
3636
evaluation_indices = ~adaptation_indices

examples/fabric/meta_learning/train_torch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def fast_adapt(batch, learner, loss, adaptation_steps, shots, ways, device):
3434
data, labels = batch
3535
data, labels = data.to(device), labels.to(device)
3636

37-
# Separate data into adaptation/evalutation sets
37+
# Separate data into adaptation/evaluation sets
3838
adaptation_indices = torch.zeros(data.size(0), dtype=bool)
3939
adaptation_indices[torch.arange(shots * ways) * 2] = True
4040
evaluation_indices = ~adaptation_indices

examples/pytorch/domain_templates/reinforce_learn_ppo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ def generate_trajectory_samples(self) -> tuple[list[torch.Tensor], list[torch.Te
353353
# logging
354354
self.avg_reward = sum(self.epoch_rewards) / self.steps_per_epoch
355355

356-
# if epoch ended abruptly, exlude last cut-short episode to prevent stats skewness
356+
# if epoch ended abruptly, exclude last cut-short episode to prevent stats skewness
357357
epoch_rewards = self.epoch_rewards
358358
if not done:
359359
epoch_rewards = epoch_rewards[:-1]

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ blank = true
3333

3434
[tool.codespell]
3535
# Todo: enable also python files in a next step
36-
skip = '*.py'
36+
#skip = '*.py'
3737
quiet-level = 3
3838
# comma separated list of words; waiting for:
3939
# https://github.com/codespell-project/codespell/issues/2839#issuecomment-1731601603

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
- for `pytorch-lightning` use `export PACKAGE_NAME=pytorch ; pip install .`
2828
- for `lightning-fabric` use `export PACKAGE_NAME=fabric ; pip install .`
2929
30-
3. Building packages as sdist or binary wheel and installing or publish to PyPI afterwords you use command
30+
3. Building packages as sdist or binary wheel and installing or publish to PyPI afterwards you use command
3131
`python setup.py sdist` or `python setup.py bdist_wheel` accordingly.
3232
In case you want to build just a particular package you want to set an environment variable:
3333
`PACKAGE_NAME=lightning|pytorch|fabric python setup.py sdist|bdist_wheel`

src/lightning/__version__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,5 @@
55
if not os.path.exists(_VERSION_PATH):
66
# relevant for `bdist_wheel`
77
_VERSION_PATH = os.path.join(_PACKAGE_ROOT, "version.info")
8-
with open(_VERSION_PATH, encoding="utf-8") as fo:
9-
version = fo.readlines()[0].strip()
8+
with open(_VERSION_PATH, encoding="utf-8") as fopen:
9+
version = fopen.readlines()[0].strip()

src/lightning/fabric/connector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ class _Connector:
8383
1. strategy class
8484
2. strategy str registered with STRATEGY_REGISTRY
8585
3. strategy str in _strategy_type enum which listed in each strategy as
86-
backend (registed these too, and _strategy_type could be deprecated)
86+
backend (registered these too, and _strategy_type could be deprecated)
8787
8888
C. plugins flag could be:
8989
1. precision class (should be removed, and precision flag should allow user pass classes)

src/lightning/fabric/fabric.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ def setup_optimizers(self, *optimizers: Optimizer) -> Union[_FabricOptimizer, tu
327327
``.setup(model, optimizer, ...)`` instead to jointly set them up.
328328
329329
Args:
330-
*optimizers: One or more optmizers to set up.
330+
*optimizers: One or more optimizers to set up.
331331
332332
Returns:
333333
The wrapped optimizer(s).

src/lightning/fabric/strategies/parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bo
8787

8888
@override
8989
def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool:
90-
"""Reduces a boolean decision over distributed processes. By default is analagous to ``all`` from the standard
90+
"""Reduces a boolean decision over distributed processes. By default is analogous to ``all`` from the standard
9191
library, returning ``True`` only if all input decisions evaluate to ``True``. If ``all`` is set to ``False``,
9292
it behaves like ``any`` instead.
9393

src/lightning/pytorch/callbacks/progress/rich_progress.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,7 @@ def on_validation_batch_start(
430430
if self.val_progress_bar_id is not None:
431431
self.progress.update(self.val_progress_bar_id, advance=0, visible=False)
432432

433-
# TODO: remove old tasks when new onces are created
433+
# TODO: remove old tasks when new once they are created
434434
self.val_progress_bar_id = self._add_task(
435435
self.total_val_batches_current_dataloader,
436436
self.validation_description,

src/lightning/pytorch/core/module.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ def current_epoch(self) -> int:
262262
def global_step(self) -> int:
263263
"""Total training batches seen across all epochs.
264264
265-
If no Trainer is attached, this propery is 0.
265+
If no Trainer is attached, this property is 0.
266266
267267
"""
268268
return self.trainer.global_step if self._trainer else 0

src/lightning/pytorch/demos/transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(self, dim: int, dropout: float = 0.1, max_len: int = 5000) -> None:
8484
def forward(self, x: Tensor) -> Tensor:
8585
if self.pe is None:
8686
# 1) can't use buffer, see https://github.com/pytorch/pytorch/issues/68407
87-
# 2) can't use parameter becauses pe gets sliced and DDP requires all params to participate in forward
87+
# 2) can't use parameter because pe gets sliced and DDP requires all params to participate in forward
8888
# TODO: Could make this a `nn.Parameter` with `requires_grad=False`
8989
self.pe = self._init_pos_encoding(device=x.device)
9090

src/lightning/pytorch/strategies/parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bo
9393

9494
@override
9595
def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool:
96-
"""Reduces a boolean decision over distributed processes. By default is analagous to ``all`` from the standard
96+
"""Reduces a boolean decision over distributed processes. By default is analogous to ``all`` from the standard
9797
library, returning ``True`` only if all input decisions evaluate to ``True``. If ``all`` is set to ``False``,
9898
it behaves like ``any`` instead.
9999

src/lightning/pytorch/trainer/connectors/accelerator_connector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -467,7 +467,7 @@ def _check_strategy_and_fallback(self) -> None:
467467
if strategy_flag in _DDP_FORK_ALIASES and "fork" not in torch.multiprocessing.get_all_start_methods():
468468
raise ValueError(
469469
f"You selected `Trainer(strategy='{strategy_flag}')` but process forking is not supported on this"
470-
f" platform. We recommed `Trainer(strategy='ddp_spawn')` instead."
470+
f" platform. We recommend `Trainer(strategy='ddp_spawn')` instead."
471471
)
472472
if strategy_flag:
473473
self._strategy_flag = strategy_flag

src/lightning_fabric/__version__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,5 @@
55
if not os.path.exists(_VERSION_PATH):
66
# relevant for `bdist_wheel`
77
_VERSION_PATH = os.path.join(_PACKAGE_ROOT, "version.info")
8-
with open(_VERSION_PATH, encoding="utf-8") as fo:
9-
version = fo.readlines()[0].strip()
8+
with open(_VERSION_PATH, encoding="utf-8") as fopen:
9+
version = fopen.readlines()[0].strip()

src/pytorch_lightning/__version__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,5 @@
55
if not os.path.exists(_VERSION_PATH):
66
# relevant for `bdist_wheel`
77
_VERSION_PATH = os.path.join(_PACKAGE_ROOT, "version.info")
8-
with open(_VERSION_PATH, encoding="utf-8") as fo:
9-
version = fo.readlines()[0].strip()
8+
with open(_VERSION_PATH, encoding="utf-8") as fopen:
9+
version = fopen.readlines()[0].strip()

tests/parity_fabric/test_parity_ddp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def train_fabric_ddp(fabric):
126126
def run_parity_test(accelerator: str = "cpu", devices: int = 2, tolerance: float = 0.02):
127127
cuda_reset()
128128

129-
# Launch processes with Fabric and re-use them for the PyTorch training for convenience
129+
# Launch processes with Fabric and reuse them for the PyTorch training for convenience
130130
fabric = Fabric(accelerator=accelerator, strategy="ddp", devices=devices)
131131
fabric.launch()
132132

tests/tests_fabric/plugins/environments/test_slurm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def test_validate_user_settings():
174174
with pytest.raises(ValueError, match="the number of nodes configured in SLURM .* does not match"):
175175
env.validate_settings(num_devices=4, num_nodes=1)
176176

177-
# in interactive mode, validation is skipped becauses processes get launched by Fabric/Trainer, not SLURM
177+
# in interactive mode, validation is skipped because processes get launched by Fabric/Trainer, not SLURM
178178
with mock.patch(
179179
"lightning.fabric.plugins.environments.slurm.SLURMEnvironment.job_name", return_value="interactive"
180180
):

tests/tests_fabric/strategies/test_ddp_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def test_reapply_compile():
8585
fabric.launch()
8686

8787
model = BoringModel()
88-
# currently (PyTorch 2.6) using ruduce-overhead here casues a RuntimeError:
88+
# currently (PyTorch 2.6) using reduce overhead here causes a RuntimeError:
8989
# Error: accessing tensor output of CUDAGraphs that has been overwritten by a subsequent run.
9090
compile_kwargs = {"mode": "reduce-overhead"} if _TORCH_LESS_EQUAL_2_6 else {}
9191
compiled_model = torch.compile(model, **compile_kwargs)

tests/tests_fabric/strategies/test_fsdp_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,7 @@ def test_reapply_compile():
412412
fabric.launch()
413413

414414
model = BoringModel()
415-
# currently (PyTorch 2.6) using ruduce-overhead here casues a RuntimeError:
415+
# currently (PyTorch 2.6) using ruduce-overhead here causes a RuntimeError:
416416
# Error: accessing tensor output of CUDAGraphs that has been overwritten by a subsequent run.
417417
compile_kwargs = {"mode": "reduce-overhead"} if _TORCH_LESS_EQUAL_2_6 else {}
418418
compiled_model = torch.compile(model, **compile_kwargs)

tests/tests_fabric/test_connector.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -194,23 +194,23 @@ def name() -> str:
194194
class Prec(Precision):
195195
pass
196196

197-
class Strat(SingleDeviceStrategy):
197+
class TestStrategy(SingleDeviceStrategy):
198198
pass
199199

200-
strategy = Strat(device=torch.device("cpu"), accelerator=Accel(), precision=Prec())
200+
strategy = TestStrategy(device=torch.device("cpu"), accelerator=Accel(), precision=Prec())
201201
connector = _Connector(strategy=strategy, devices=2)
202202
assert isinstance(connector.accelerator, Accel)
203-
assert isinstance(connector.strategy, Strat)
203+
assert isinstance(connector.strategy, TestStrategy)
204204
assert isinstance(connector.precision, Prec)
205205
assert connector.strategy is strategy
206206

207-
class Strat(DDPStrategy):
207+
class TestStrategy(DDPStrategy):
208208
pass
209209

210-
strategy = Strat(accelerator=Accel(), precision=Prec())
210+
strategy = TestStrategy(accelerator=Accel(), precision=Prec())
211211
connector = _Connector(strategy=strategy, devices=2)
212212
assert isinstance(connector.accelerator, Accel)
213-
assert isinstance(connector.strategy, Strat)
213+
assert isinstance(connector.strategy, TestStrategy)
214214
assert isinstance(connector.precision, Prec)
215215
assert connector.strategy is strategy
216216

tests/tests_pytorch/callbacks/test_throughput_monitor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ def test_throughput_monitor_eval(tmp_path, fn):
303303
assert logger_mock.log_metrics.mock_calls == [
304304
call(metrics={**expected, f"{fn}|batches": 3, f"{fn}|samples": 9}, step=3),
305305
call(metrics={**expected, f"{fn}|batches": 6, f"{fn}|samples": 18}, step=6),
306-
# the step doesnt repeat
306+
# the step doesn't repeat
307307
call(metrics={**expected, f"{fn}|batches": 9, f"{fn}|samples": 27}, step=9),
308308
call(metrics={**expected, f"{fn}|batches": 12, f"{fn}|samples": 36}, step=12),
309309
]

tests/tests_pytorch/checkpointing/test_model_checkpoint.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -326,8 +326,8 @@ def test_model_checkpoint_to_yaml(tmp_path, save_top_k: int):
326326

327327
path_yaml = tmp_path / "best_k_models.yaml"
328328
checkpoint.to_yaml(path_yaml)
329-
with open(path_yaml) as fo:
330-
d = yaml.full_load(fo)
329+
with open(path_yaml) as fopen:
330+
d = yaml.full_load(fopen)
331331
best_k = dict(checkpoint.best_k_models.items())
332332
assert d == best_k
333333

tests/tests_pytorch/core/test_lightning_optimizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def configure_optimizers(self):
4545

4646

4747
def test_init_optimizers_resets_lightning_optimizers(tmp_path):
48-
"""Test that the Trainer resets the `lightning_optimizers` list everytime new optimizers get initialized."""
48+
"""Test that the Trainer resets the `lightning_optimizers` list every time new optimizers get initialized."""
4949

5050
def compare_optimizers():
5151
assert trainer.strategy._lightning_optimizers[0].optimizer is trainer.optimizers[0]

tests/tests_pytorch/models/test_cpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def test_cpu_slurm_save_load(_, tmp_path):
4949
trainer.fit(model)
5050
real_global_step = trainer.global_step
5151

52-
# traning complete
52+
# training complete
5353
assert trainer.state.finished, "cpu model failed to complete"
5454

5555
# predict with trained model before saving

tests/tests_pytorch/models/test_restore.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ def test_strict_model_load_more_params(monkeypatch, tmp_path, tmpdir_server, url
547547
)
548548
trainer.fit(model)
549549

550-
# traning complete
550+
# training complete
551551
assert trainer.state.finished, f"Training failed with {trainer.state}"
552552

553553
# save model
@@ -587,7 +587,7 @@ def test_strict_model_load_less_params(monkeypatch, tmp_path, tmpdir_server, url
587587
)
588588
trainer.fit(model)
589589

590-
# traning complete
590+
# training complete
591591
assert trainer.state.finished, f"Training failed with {trainer.state}"
592592

593593
# save model

tests/tests_pytorch/plugins/test_checkpoint_io_plugin.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from typing import Any, Optional
1717
from unittest.mock import MagicMock, Mock
1818

19+
import pytest
1920
import torch
2021

2122
from lightning.fabric.plugins import CheckpointIO, TorchCheckpointIO
@@ -97,6 +98,7 @@ def test_checkpoint_plugin_called(tmp_path):
9798
checkpoint_plugin.load_checkpoint.assert_called_with(str(tmp_path / "last-v1.ckpt"))
9899

99100

101+
@pytest.mark.flaky(reruns=3)
100102
def test_async_checkpoint_plugin(tmp_path):
101103
"""Ensure that the custom checkpoint IO plugin and torch checkpoint IO plugin is called when async saving and
102104
loading."""

tests/tests_pytorch/strategies/launchers/test_multiprocessing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ def test_fit_twice_raises(mps_count_0):
230230
barebones=True,
231231
)
232232
trainer.fit(model)
233-
trainer.test(model) # make sure testing in between doesnt impact the result
233+
trainer.test(model) # make sure testing in between doesn't impact the result
234234
trainer.fit_loop.max_epochs += 1
235235
with pytest.raises(NotImplementedError, match=r"twice.*is not supported"):
236236
trainer.fit(model)

0 commit comments

Comments
 (0)