diff --git a/.flake8 b/.flake8 index d93dfb73b7e9b1..d8e2797cb2c3d4 100644 --- a/.flake8 +++ b/.flake8 @@ -13,6 +13,7 @@ ignore = # these ignores are from flake8-comprehensions; please fix! C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415 per-file-ignores = __init__.py: F401 torch/utils/cpp_extension.py: B950 +optional-ascii-coding = True exclude = ./.git, ./build_code_analyzer, diff --git a/requirements-flake8.txt b/requirements-flake8.txt index 1e2ba252556f3b..4f521b30c48106 100644 --- a/requirements-flake8.txt +++ b/requirements-flake8.txt @@ -2,6 +2,7 @@ flake8==3.8.2 flake8-bugbear==20.1.4 flake8-comprehensions==3.3.0 flake8-executable==2.0.4 +git+https://github.com/malfet/flake8-coding.git flake8-pyi==20.5.0 mccabe pycodestyle==2.6.0 diff --git a/scripts/release_notes/commitlist.py b/scripts/release_notes/commitlist.py index cd92af6e36f01d..d78efff2b0daca 100644 --- a/scripts/release_notes/commitlist.py +++ b/scripts/release_notes/commitlist.py @@ -150,7 +150,7 @@ def get_markdown_header(category): * **Please cleanup, and format commit titles to be readable by the general pytorch user.** [Detailed intructions here](https://fb.quip.com/OCRoAbEvrRD9#HdaACARZZvo) * Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good. * Please drop any commits that are not user-facing. -* If anything is from another domain, leave it in the UNTOPICED section at the end and I’ll come and take care of it. +* If anything is from another domain, leave it in the UNTOPICED section at the end and I'll come and take care of it. The categories below are as follows: diff --git a/test/package/test_misc.py b/test/package/test_misc.py index d97adf162e0f96..13eb8ac8cdee1a 100644 --- a/test/package/test_misc.py +++ b/test/package/test_misc.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import inspect from io import BytesIO from sys import version_info diff --git a/test/package/test_resources.py b/test/package/test_resources.py index 8aca7d2d216392..eb33deed991b7c 100644 --- a/test/package/test_resources.py +++ b/test/package/test_resources.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from io import BytesIO from sys import version_info from textwrap import dedent diff --git a/test/test_fx.py b/test/test_fx.py index bd871cce467388..106123c1a25ced 100644 --- a/test/test_fx.py +++ b/test/test_fx.py @@ -1860,7 +1860,7 @@ def forward(self, x): traced(5) self.assertIn("Call using an FX-traced Module, line 4 of the " - "traced Module’s generated forward function:", + "traced Module's generated forward function:", captured[0]) def test_custom_traceback_not_raised_when_exception_source_is_submodule(self): @@ -1882,7 +1882,7 @@ def forward(self, x): captured = traceback.format_exc() self.assertNotIn("Call using an FX-traced Module, line 4 of the" - " traced Module’s generated forward function:", + " traced Module's generated forward function:", captured) def test_ast_rewriter_rewrites_assert(self): diff --git a/test/test_jit_fuser.py b/test/test_jit_fuser.py index 555dc3ab2c046d..cfccbf9c1dd216 100644 --- a/test/test_jit_fuser.py +++ b/test/test_jit_fuser.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import unittest import os import sys diff --git a/test/test_linalg.py b/test/test_linalg.py index 3dc0be21a12a64..3cc766c3a7b0ab 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import torch import numpy as np diff --git a/test/test_torch.py b/test/test_torch.py index 5c66c58a69d756..558ee8c014d6f8 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import torch import numpy as np diff --git a/tools/print_test_stats.py b/tools/print_test_stats.py index ba09d47250d91d..a3d343d153af73 100755 --- a/tools/print_test_stats.py +++ b/tools/print_test_stats.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- import bz2 import datetime diff --git a/tools/test/test_stats.py b/tools/test/test_stats.py index aafa3ba2b96179..b85c38763cce08 100644 --- a/tools/test/test_stats.py +++ b/tools/test/test_stats.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import unittest from tools import print_test_stats diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py index 330634fa4f5bd4..eee49b8d917c67 100644 --- a/torch/_torch_docs.py +++ b/torch/_torch_docs.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Adds docstrings to functions defined in the torch._C""" import re diff --git a/torch/distributed/elastic/metrics/__init__.py b/torch/distributed/elastic/metrics/__init__.py index 4f5a037829f8ab..f72f75554d82d3 100644 --- a/torch/distributed/elastic/metrics/__init__.py +++ b/torch/distributed/elastic/metrics/__init__.py @@ -36,7 +36,7 @@ **Publish Metrics**: -Using torchelastic’s metrics API is similar to using python’s logging +Using torchelastic's metrics API is similar to using python's logging framework. You first have to configure a metrics handler before trying to add metric data. diff --git a/torch/distributed/elastic/rendezvous/__init__.py b/torch/distributed/elastic/rendezvous/__init__.py index 6c549359ab79c4..69e8e1e3a1a6ad 100644 --- a/torch/distributed/elastic/rendezvous/__init__.py +++ b/torch/distributed/elastic/rendezvous/__init__.py @@ -1,4 +1,5 @@ #!/usr/bin/env/python3 +# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. diff --git a/torch/distributed/elastic/rendezvous/api.py b/torch/distributed/elastic/rendezvous/api.py index 363fe1b3ef807c..49be9c5bfdd725 100644 --- a/torch/distributed/elastic/rendezvous/api.py +++ b/torch/distributed/elastic/rendezvous/api.py @@ -95,7 +95,7 @@ def set_closed(self): def num_nodes_waiting(self) -> int: """ Returns number of workers who *arrived late* at - the rendezvous barrier, hence weren’t included in the current worker + the rendezvous barrier, hence weren't included in the current worker group. Callers should periodically call this method to check whether diff --git a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py index 960bb88bd9d352..2acf52abb8008d 100644 --- a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py +++ b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. diff --git a/torch/distributed/nn/api/remote_module.py b/torch/distributed/nn/api/remote_module.py index 9b435577cbfecd..88e0dd721bef78 100644 --- a/torch/distributed/nn/api/remote_module.py +++ b/torch/distributed/nn/api/remote_module.py @@ -101,7 +101,7 @@ def __init__( ``def forward_async(input: Tensor) -> Future[Tensor]:``. Args: - remote_device (str): Device on the destination worker where we‘d like to place this module. + remote_device (str): Device on the destination worker where we'd like to place this module. The format should be "/", where the device field can be parsed as torch.device type. E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0". In addition, the device field can be optional and the default value is "cpu". @@ -355,7 +355,7 @@ class RemoteModule(_RemoteModule): | ``def forward_async(input: Tensor) -> Future[Tensor]:`` Args: - remote_device (str): Device on the destination worker where we‘d like to place this module. + remote_device (str): Device on the destination worker where we'd like to place this module. The format should be "/", where the device field can be parsed as torch.device type. E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0". In addition, the device field can be optional and the default value is "cpu". diff --git a/torch/distributed/pipeline/sync/_balance/blockpartition.py b/torch/distributed/pipeline/sync/_balance/blockpartition.py index 7afe782f6ac8c7..0e74eff33a2212 100644 --- a/torch/distributed/pipeline/sync/_balance/blockpartition.py +++ b/torch/distributed/pipeline/sync/_balance/blockpartition.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2019 Kakao Brain # # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/torch/distributed/pipeline/sync/pipeline.py b/torch/distributed/pipeline/sync/pipeline.py index c5d3836159c954..4c5fd033057a58 100644 --- a/torch/distributed/pipeline/sync/pipeline.py +++ b/torch/distributed/pipeline/sync/pipeline.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2019 Kakao Brain # # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/torch/distributed/pipeline/sync/skip/portal.py b/torch/distributed/pipeline/sync/skip/portal.py index 6b3bbb3fb761d0..763043a111d3d6 100644 --- a/torch/distributed/pipeline/sync/skip/portal.py +++ b/torch/distributed/pipeline/sync/skip/portal.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2019 Kakao Brain # # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/torch/distributed/pipeline/sync/skip/skippable.py b/torch/distributed/pipeline/sync/skip/skippable.py index 6f6bcd7b5614fb..fc2f86df8a8034 100644 --- a/torch/distributed/pipeline/sync/skip/skippable.py +++ b/torch/distributed/pipeline/sync/skip/skippable.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2019 Kakao Brain # # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/torch/distributed/rpc/utils.py b/torch/distributed/rpc/utils.py index 40585a73521d5f..afdde21f3c5690 100644 --- a/torch/distributed/rpc/utils.py +++ b/torch/distributed/rpc/utils.py @@ -3,7 +3,7 @@ def _parse_remote_device(remote_device: str): Parses the remote device. Args: - remote_device (str): Device on the destination worker where we‘d like to place this module. + remote_device (str): Device on the destination worker where we'd like to place this module. The format should be "/", where the device field can be parsed as torch.device type. E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0". In addition, the device field can be optional and the default value is "cpu". diff --git a/torch/fx/__init__.py b/torch/fx/__init__.py index 931fab1b66787e..01b2d39398b827 100644 --- a/torch/fx/__init__.py +++ b/torch/fx/__init__.py @@ -48,7 +48,7 @@ def forward(self, x): return clamp_1 """ -The **symbolic tracer** performs “symbolic execution” of the Python +The **symbolic tracer** performs "symbolic execution" of the Python code. It feeds fake values, called Proxies, through the code. Operations on theses Proxies are recorded. More information about symbolic tracing can be found in the :func:`symbolic_trace` and :class:`Tracer` @@ -63,13 +63,13 @@ def forward(self, x): **Python code generation** is what makes FX a Python-to-Python (or Module-to-Module) transformation toolkit. For each Graph IR, we can -create valid Python code matching the Graph’s semantics. This +create valid Python code matching the Graph's semantics. This functionality is wrapped up in :class:`GraphModule`, which is a :class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a ``forward`` method generated from the Graph. -Taken together, this pipeline of components (symbolic tracing → -intermediate representation → transforms → Python code generation) +Taken together, this pipeline of components (symbolic tracing -> +intermediate representation -> transforms -> Python code generation) constitutes the Python-to-Python transformation pipeline of FX. In addition, these components can be used separately. For example, symbolic tracing can be used in isolation to capture a form of diff --git a/torch/fx/graph_module.py b/torch/fx/graph_module.py index 0e9c2410adb37a..047736caacef8f 100644 --- a/torch/fx/graph_module.py +++ b/torch/fx/graph_module.py @@ -488,7 +488,7 @@ def generate_error_message(frame_summary: traceback.FrameSummary) -> str: # constiuent substrings of the error message tb_repr = traceback.format_exc() custom_msg = ("Call using an FX-traced Module, " - f"line {err_lineno} of the traced Module’s " + f"line {err_lineno} of the traced Module's " "generated forward function:") before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno]) marker = "~" * err_line_len + "~~~ <--- HERE" diff --git a/torch/linalg/__init__.py b/torch/linalg/__init__.py index ef16e3c4891b15..09bad1069c5bdc 100644 --- a/torch/linalg/__init__.py +++ b/torch/linalg/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import sys import torch diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py index 57c8b40cc78a71..7220c6cba91e3c 100644 --- a/torch/nn/modules/conv.py +++ b/torch/nn/modules/conv.py @@ -1,4 +1,4 @@ -# coding=utf-8 +# -*- coding: utf-8 -*- import math import warnings diff --git a/torch/nn/modules/fold.py b/torch/nn/modules/fold.py index 6a65071c5d3041..fd132a9690b918 100644 --- a/torch/nn/modules/fold.py +++ b/torch/nn/modules/fold.py @@ -1,4 +1,4 @@ -# coding=utf-8 +# -*- coding: utf-8 -*- from .module import Module from .. import functional as F diff --git a/torch/onnx/__init__.py b/torch/onnx/__init__.py index 0adb78c93fc4e0..37efecec5fd708 100644 --- a/torch/onnx/__init__.py +++ b/torch/onnx/__init__.py @@ -50,7 +50,7 @@ def export(model, args, f, export_params=True, verbose=False, training=TrainingM 1. ONLY A TUPLE OF ARGUMENTS or torch.Tensor:: - ‘’args = (x, y, z)’' + "args = (x, y, z)" The inputs to the model, e.g., such that ``model(*args)`` is a valid invocation of the model. Any non-Tensor arguments will be hard-coded into the exported model; @@ -60,11 +60,11 @@ def export(model, args, f, export_params=True, verbose=False, training=TrainingM 2. A TUPLE OF ARGUEMENTS WITH A DICTIONARY OF NAMED PARAMETERS:: - ‘’args = (x, + "args = (x, { - ‘y’: input_y, - ‘z’: input_z - }) ‘’ + 'y': input_y, + 'z': input_z + })" The inputs to the model are structured as a tuple consisting of non-keyword arguments and the last value of this tuple being a dictionary @@ -82,20 +82,20 @@ def forward(self, k, x): return x m = Model() - k = torch.randn(2, 3)   - x = {torch.tensor(1.): torch.randn(2, 3)} + k = torch.randn(2, 3) + x = {torch.tensor(1.): torch.randn(2, 3)} In the previous iteration, the call to export API would look like - torch.onnx.export(model, (k, x), ‘test.onnx’) + torch.onnx.export(model, (k, x), 'test.onnx') This would work as intended. However, the export function - would now assume that the ‘x’ input is intended to represent the optional + would now assume that the `x` input is intended to represent the optional dictionary consisting of named arguments. In order to prevent this from being an issue a constraint is placed to provide an empty dictionary as the last input in the tuple args in such cases. The new call would look like this. - torch.onnx.export(model, (k, x, {}), ‘test.onnx’) + torch.onnx.export(model, (k, x, {}), 'test.onnx') f: a file-like object (has to implement fileno that returns a file descriptor) or a string containing a file name. A binary Protobuf will be written diff --git a/torch/package/_file_structure_representation.py b/torch/package/_file_structure_representation.py index 4a159cdb863f3c..c952b9628395c8 100644 --- a/torch/package/_file_structure_representation.py +++ b/torch/package/_file_structure_representation.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from typing import Dict, List from ._glob_group import GlobPattern, _GlobGroup diff --git a/torch/quantization/quantize_jit.py b/torch/quantization/quantize_jit.py index 9933586e36c839..e6df61303a877c 100644 --- a/torch/quantization/quantize_jit.py +++ b/torch/quantization/quantize_jit.py @@ -128,7 +128,7 @@ def quantize_jit(model, qconfig_dict, run_fn, run_args, inplace=False, debug=Fal `model`: input float TorchScript model `qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and qconfig for that module as value, empty key means the qconfig will be applied - to whole model unless it’s overwritten by more specific configurations, the + to whole model unless it's overwritten by more specific configurations, the qconfig for each module is either found in the dictionary or fallback to the qconfig of parent module. diff --git a/torch/utils/benchmark/examples/end_to_end.py b/torch/utils/benchmark/examples/end_to_end.py index 942c20e541734e..524795188a91d8 100644 --- a/torch/utils/benchmark/examples/end_to_end.py +++ b/torch/utils/benchmark/examples/end_to_end.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """End-to-end example to test a PR for regressions: $ python -m examples.end_to_end --pr 39850