Skip to content

Commit

Permalink
Back out "Revert D25757721: [pytorch][PR] Run mypy on more test files" (
Browse files Browse the repository at this point in the history
pytorch#50142)

Summary:
Pull Request resolved: pytorch#50142

Original commit changeset: 58437d719285

Test Plan: OSS CI

Reviewed By: walterddr, ngimel

Differential Revision: D25803866

fbshipit-source-id: d6b83a5211e430c0451994391876103f1ad96315
  • Loading branch information
ezyang authored and facebook-github-bot committed Jan 6, 2021
1 parent 6380869 commit 3ce5398
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 14 deletions.
11 changes: 11 additions & 0 deletions mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,13 @@ check_untyped_defs = True
files =
torch,
caffe2,
test/test_bundled_images.py,
test/test_bundled_inputs.py,
test/test_complex.py,
test/test_dataset.py,
test/test_expecttest.py,
test/test_futures.py,
test/test_numpy_interop.py,
test/test_torch.py,
test/test_type_hints.py,
test/test_type_info.py
Expand Down Expand Up @@ -119,6 +124,12 @@ ignore_errors = True
[mypy-torch.overrides]
ignore_errors = True

#
# Adding type annotations to caffe2 is probably not worth the effort
# only work on this if you have a specific reason for it, otherwise
# leave these ignores as they are.
#

[mypy-caffe2.python.*]
ignore_errors = True

Expand Down
4 changes: 3 additions & 1 deletion test/test_bundled_inputs.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
import io
from typing import List

import torch
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
Expand Down Expand Up @@ -27,7 +29,7 @@ def forward(self, arg):

sm = torch.jit.script(SingleTensorModel())
original_size = model_size(sm)
get_expr = []
get_expr : List[str] = []
samples = [
# Tensor with small numel and small storage.
(torch.tensor([1]),),
Expand Down
3 changes: 2 additions & 1 deletion test/test_expecttest.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import string
import textwrap
import doctest
from typing import Dict, Any

import hypothesis
from hypothesis.strategies import text, integers, composite, sampled_from, booleans
Expand Down Expand Up @@ -38,7 +39,7 @@ def test_replace_string_literal_roundtrip(self, t, raw, quote):
r3 = {r}{quote}placeholder3{quote}
""".format(r='r' if raw else '', quote=quote * 3)
new_prog = expecttest.replace_string_literal(textwrap.dedent(prog), 2, t)[0]
ns = {}
ns : Dict[str, Any] = {}
exec(new_prog, ns)
msg = "program was:\n{}".format(new_prog)
self.assertEqual(ns['r'], 'placeholder', msg=msg) # noqa: F821
Expand Down
18 changes: 9 additions & 9 deletions test/test_numpy_interop.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,8 @@ def get_castable_tensor(shape, dtype):
else:
# can't directly use min and max, because for int64_t, max - min
# is greater than int64_t range and triggers UB.
dtype_info = torch.iinfo(dtype)
low = max(dtype_info.min, int(-1e10))
high = min(dtype_info.max, int(1e10))
dtype_info = torch.iinfo(dtype)
low = max(torch.iinfo(dtype).min, int(-1e10))
high = min(torch.iinfo(dtype).max, int(1e10))
t = torch.empty(shape, dtype=torch.int64).random_(low, high)
return t.to(dtype)

Expand Down Expand Up @@ -272,10 +270,12 @@ def test_numpy_array_interface(self, device):
]
for tp, dtype in zip(types, dtypes):
if np.dtype(dtype).kind == 'u':
x = torch.Tensor([1, 2, 3, 4]).type(tp)
# .type expects a XxxTensor, which have no type hints on
# purpose, so ignore during mypy type checking
x = torch.Tensor([1, 2, 3, 4]).type(tp) # type: ignore
array = np.array([1, 2, 3, 4], dtype=dtype)
else:
x = torch.Tensor([1, -2, 3, -4]).type(tp)
x = torch.Tensor([1, -2, 3, -4]).type(tp) # type: ignore
array = np.array([1, -2, 3, -4], dtype=dtype)

# Test __array__ w/o dtype argument
Expand Down Expand Up @@ -309,7 +309,7 @@ def test_numpy_array_interface(self, device):
float_types = [torch.DoubleTensor, torch.FloatTensor]
float_dtypes = [np.float64, np.float32]
for tp, dtype in zip(float_types, float_dtypes):
x = torch.Tensor([1, 2, 3, 4]).type(tp)
x = torch.Tensor([1, 2, 3, 4]).type(tp) # type: ignore
array = np.array([1, 2, 3, 4], dtype=dtype)
for func in ['sin', 'sqrt', 'ceil']:
ufunc = getattr(np, func)
Expand All @@ -321,7 +321,7 @@ def test_numpy_array_interface(self, device):

# Test functions with boolean return value
for tp, dtype in zip(types, dtypes):
x = torch.Tensor([1, 2, 3, 4]).type(tp)
x = torch.Tensor([1, 2, 3, 4]).type(tp) # type: ignore
array = np.array([1, 2, 3, 4], dtype=dtype)
geq2_x = np.greater_equal(x, 2)
geq2_array = np.greater_equal(array, 2).astype('uint8')
Expand Down Expand Up @@ -360,7 +360,7 @@ def test_parse_numpy_int(self, device):
self.assertEqual(torch.ones([2, 2, 2, 2]).mean(scalar), torch.ones([2, 2, 2, 2]).mean(np_val))

# numpy integral type parses like a python int in custom python bindings:
self.assertEqual(torch.Storage(np_val).size(), scalar)
self.assertEqual(torch.Storage(np_val).size(), scalar) # type: ignore

tensor = torch.tensor([2], dtype=torch.int)
tensor[0] = np_val
Expand Down
4 changes: 3 additions & 1 deletion torch/testing/_internal/expecttest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import traceback
import os
import string
from typing import Tuple


# This file implements expect tests (also known as "golden" tests).
Expand Down Expand Up @@ -139,7 +140,8 @@ def ok_for_raw_triple_quoted_string(s, quote):
r"(?P<raw>r?)", re.DOTALL)


def replace_string_literal(src, lineno, new_string):
def replace_string_literal(src : str, lineno : int,
new_string : str) -> Tuple[str, int]:
r"""
Replace a triple quoted string literal with new contents.
Only handles printable ASCII correctly at the moment. This
Expand Down
4 changes: 2 additions & 2 deletions torch/utils/bundled_inputs.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
from typing import Any, TypeVar, Optional, Tuple, List, NamedTuple, Union
from typing import Any, TypeVar, Optional, Tuple, List, NamedTuple, Union, Sequence
import textwrap
import torch
from torch._C import TupleType, OptionalType, ListType
Expand All @@ -17,7 +17,7 @@ class InflatableArg(NamedTuple):

def augment_model_with_bundled_inputs(
model: torch.jit.ScriptModule,
inputs: Optional[List[Tuple[Any, ...]]] = None,
inputs: Optional[Sequence[Tuple[Any, ...]]] = None,
_receive_inflate_expr: Optional[List[str]] = None, # For debugging.
) -> None:
"""Add bundled sample inputs to a model.
Expand Down

0 comments on commit 3ce5398

Please sign in to comment.