Skip to content

Commit

Permalink
Fix typos under torch/utils directory (pytorch#97516)
Browse files Browse the repository at this point in the history
This PR fixes typos in comments and messages of `.py` files under `torch/utils` directory

Pull Request resolved: pytorch#97516
Approved by: https://github.com/ezyang
  • Loading branch information
kiszk authored and pytorchmergebot committed Mar 24, 2023
1 parent d305d4a commit 622a11d
Show file tree
Hide file tree
Showing 7 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion torch/utils/benchmark/utils/fuzzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def __init__(
min_elements:
The minimum number of parameters that this Tensor must have for a
set of parameters to be valid. (Otherwise they are resampled.)
max_elemnts:
max_elements:
Like `min_elements`, but setting an upper bound.
max_allocation_bytes:
Like `max_elements`, but for the size of Tensor that must be
Expand Down
2 changes: 1 addition & 1 deletion torch/utils/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ def forward(input):
# with retain_graph=True, we would store recomputed variables as the values of a
# WeakKeyDictionary and pack strong references to the keys, so that as we
# backward, those packed keys would be cleared as long as retain_graph=False.
# Clearing the packed key clears the corresonding entry in the WKD.
# Clearing the packed key clears the corresponding entry in the WKD.
#
# If we wish recomputed variables to be immediately cleared as we unpack them in
# the retain_graph=True case, we cannot rely on the packed keys to be cleared by
Expand Down
4 changes: 2 additions & 2 deletions torch/utils/cpp_extension.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ def check_compiler_ok_for_platform(compiler: str) -> bool:
env['LC_ALL'] = 'C' # Don't localize output
version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS)
if IS_LINUX:
# Check for 'gcc' or 'g++' for sccache warpper
# Check for 'gcc' or 'g++' for sccache wrapper
pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE)
results = re.findall(pattern, version_string)
if len(results) != 1:
Expand Down Expand Up @@ -1906,7 +1906,7 @@ def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) ->
_, error, _ = sys.exc_info()
# error.output contains the stdout and stderr of the build attempt.
message = error_prefix
# `error` is a CalledProcessError (which has an `ouput`) attribute, but
# `error` is a CalledProcessError (which has an `output`) attribute, but
# mypy thinks it's Optional[BaseException] and doesn't narrow
if hasattr(error, 'output') and error.output: # type: ignore[union-attr]
message += f": {error.output.decode(*SUBPROCESS_DECODE_ARGS)}" # type: ignore[union-attr]
Expand Down
8 changes: 4 additions & 4 deletions torch/utils/data/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ def __setattr__(self, attr, val):
# since '_BaseDataLoaderIter' references 'DataLoader'.
def __iter__(self) -> '_BaseDataLoaderIter':
# When using a single worker the returned iterator should be
# created everytime to avoid reseting its state
# created everytime to avoid resetting its state
# However, in the case of a multiple workers iterator
# the iterator is only created once in the lifetime of the
# DataLoader object so that workers can be reused
Expand Down Expand Up @@ -545,7 +545,7 @@ def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked):
pass
if max_num_worker_suggest is None:
# os.cpu_count() could return Optional[int]
# get cpu count first and check None in order to satify mypy check
# get cpu count first and check None in order to satisfy mypy check
cpu_count = os.cpu_count()
if cpu_count is not None:
max_num_worker_suggest = cpu_count
Expand Down Expand Up @@ -733,7 +733,7 @@ class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):
#
# First of all, `__del__` is **not** guaranteed to be called when
# interpreter exits. Even if it is called, by the time it executes,
# many Python core library resources may alreay be freed, and even
# many Python core library resources may already be freed, and even
# simple things like acquiring an internal lock of a queue may hang.
# Therefore, in this case, we actually need to prevent `__del__` from
# being executed, and rely on the automatic termination of daemonic
Expand Down Expand Up @@ -978,7 +978,7 @@ class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):
# NOTE: (c) is better placed after (b) because it may leave corrupted
# data in `worker_result_queue`, which `pin_memory_thread`
# reads from, in which case the `pin_memory_thread` can only
# happen at timeing out, which is slow. Nonetheless, same thing
# happen at timing out, which is slow. Nonetheless, same thing
# happens if a worker is killed by signal at unfortunate times,
# but in other cases, we are better off having a non-corrupted
# `worker_result_queue` for `pin_memory_thread`.
Expand Down
2 changes: 1 addition & 1 deletion torch/utils/data/datapipes/dataframe/dataframes.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def __str__(self):

def execute(self):

# TODO: VitalyFedyunin execute kwargs and maybe nestted structures
# TODO: VitalyFedyunin execute kwargs and maybe nested structures
executed_args = []
for arg in self.kwargs['args']:
if isinstance(arg, Capture):
Expand Down
6 changes: 3 additions & 3 deletions torch/utils/data/datapipes/iter/combining.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def __new__(
copy: Optional[Literal["shallow", "deep"]] = None
):
if num_instances < 1:
raise ValueError(f"Expected `num_instaces` larger than 0, but {num_instances} is found")
raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found")
if num_instances == 1:
return datapipe
container = _ForkerIterDataPipe(datapipe, num_instances, buffer_size, copy)
Expand Down Expand Up @@ -191,7 +191,7 @@ def get_next_element_by_instance(self, instance_id: int):
if self.buffer and self.child_pointers[instance_id] <= self.leading_ptr:
idx = self.child_pointers[instance_id] - self.slowest_ptr - 1
return_val = self.buffer[idx]
else: # Retreive one element from main datapipe
else: # Retrieve one element from main datapipe
self.leading_ptr = self.child_pointers[instance_id]
try:
return_val = next(self._datapipe_iterator) # type: ignore[arg-type]
Expand Down Expand Up @@ -385,7 +385,7 @@ class DemultiplexerIterDataPipe(IterDataPipe):
def __new__(cls, datapipe: IterDataPipe, num_instances: int,
classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000):
if num_instances < 1:
raise ValueError(f"Expected `num_instaces` larger than 0, but {num_instances} is found")
raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found")

_check_unpickable_fn(classifier_fn)

Expand Down
2 changes: 1 addition & 1 deletion torch/utils/file_baton.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def __init__(self, lock_file_path, wait_seconds=0.1):
Args:
lock_file_path: The path to the file used for locking.
wait_seconds: The seconds to periorically sleep (spin) when
wait_seconds: The seconds to periodically sleep (spin) when
calling ``wait()``.
'''
self.lock_file_path = lock_file_path
Expand Down

0 comments on commit 622a11d

Please sign in to comment.