Skip to content

Commit

Permalink
Diff against master and enable bugprone-* checks (pytorch#12378)
Browse files Browse the repository at this point in the history
Summary:
This PR:

1. Makes clang-tidy diff against `master` instead of `HEAD~1` in CI, which makes much more sense
2. Enables all checks in the `bugprone-*` category (see https://clang.llvm.org/extra/clang-tidy/checks/list.html) except one about parantheses in macros, because it doesn't always apply too well for us.

Fixed some nice code smells.

ezyang
Pull Request resolved: pytorch#12378

Differential Revision: D10247972

Pulled By: goldsborough

fbshipit-source-id: 97dc9e262effa6874d2854584bf41a86684eb8bd
  • Loading branch information
goldsborough authored and facebook-github-bot committed Oct 10, 2018
1 parent 727609f commit 033e957
Show file tree
Hide file tree
Showing 22 changed files with 113 additions and 69 deletions.
2 changes: 2 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
Checks: '
-*
,modernize-deprecated-headers
,bugprone-*
,-bugprone-macro-parentheses
'
WarningsAsErrors: '*'
HeaderFilterRegex: 'torch/csrc/.*'
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/gen_variable_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@
std::shared_ptr<jit::tracer::TracingState> tracer_state;
if (jit::tracer::isTracing()) {
tracer_state = jit::tracer::getTracingState();
node = tracer_state->graph->create(jit::aten::${trace_name}, /*outputs=*/0);
node = tracer_state->graph->create(jit::aten::${trace_name}, /*num_outputs=*/0);
jit::tracer::recordSourceLocation(node);
${add_trace_inputs}
tracer_state->graph->appendNode(node);
Expand Down
54 changes: 41 additions & 13 deletions tools/clang_tidy.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"""

import argparse
import collections
import fnmatch
import json
import os.path
Expand All @@ -22,6 +23,9 @@
import sys


Patterns = collections.namedtuple("Patterns", "positive, negative")


# NOTE: Clang-tidy cannot lint headers directly, because headers are not
# compiled -- translation units are, of which there is one per implementation
# (c/cc/cpp) file.
Expand All @@ -46,25 +50,47 @@ def run_shell_command(arguments):
return output


def split_negative_from_positive_patterns(patterns):
"""Separates negative patterns (that start with a dash) from positive patterns"""
positive, negative = [], []
for pattern in patterns:
if pattern.startswith("-"):
negative.append(pattern[1:])
else:
positive.append(pattern)

return Patterns(positive, negative)


def get_file_patterns(globs, regexes):
"""Returns a list of compiled regex objects from globs and regex pattern strings."""
# fnmatch.translate converts a glob into a regular expression.
# https://docs.python.org/2/library/fnmatch.html#fnmatch.translate
regexes += [fnmatch.translate(glob) for glob in globs]
return [re.compile(regex) for regex in regexes] or [DEFAULT_FILE_PATTERN]
glob = split_negative_from_positive_patterns(globs)
regexes = split_negative_from_positive_patterns(regexes)

positive_regexes = regexes.positive + [fnmatch.translate(g) for g in glob.positive]
negative_regexes = regexes.negative + [fnmatch.translate(g) for g in glob.negative]

positive_patterns = [re.compile(regex) for regex in positive_regexes] or [
DEFAULT_FILE_PATTERN
]
negative_patterns = [re.compile(regex) for regex in negative_regexes]

return Patterns(positive_patterns, negative_patterns)


def filter_files(files, file_patterns):
"""Returns all files that match any of the patterns."""
if VERBOSE:
print("Filtering with these file patterns: {}".format(file_patterns))
for file in files:
has_match = False
for pattern in file_patterns:
if pattern.match(file):
if not any(n.match(file) for n in file_patterns.negative):
if any(p.match(file) for p in file_patterns.positive):
yield file
has_match = True
if not has_match and VERBOSE:
message = "{} does not match any file pattern in {{{}}}"
print(message.format(file, ", ".join(map(str, file_patterns))))
continue
if VERBOSE:
print("{} ommitted due to file filters".format(file))


def get_changed_files(revision, paths):
Expand Down Expand Up @@ -135,17 +161,19 @@ def parse_options():
parser.add_argument(
"-g",
"--glob",
nargs="+",
action="append",
default=[],
help="Only lint files that match these glob patterns "
"(see documentation for `fnmatch` for supported syntax)",
"(see documentation for `fnmatch` for supported syntax)."
"If a pattern starts with a - the search is negated for that pattern.",
)
parser.add_argument(
"-x",
"--regex",
nargs="+",
action="append",
default=[],
help="Only lint files that match these regular expressions (from the start of the filename)",
help="Only lint files that match these regular expressions (from the start of the filename). "
"If a pattern starts with a - the search is negated for that pattern.",
)
parser.add_argument(
"-c",
Expand Down
12 changes: 11 additions & 1 deletion tools/run-clang-tidy-in-ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,14 @@ if [[ ! -d build ]]; then
fi

# Run Clang-Tidy
time python tools/clang_tidy.py -vp torch/csrc -d HEAD~1 "$@"
# The negative filters below are to exclude files that include onnx_pb.h,
# otherwise we'd have to build ONNX protos as part of this CI job.
time python tools/clang_tidy.py \
--verbose \
--paths torch/csrc \
--diff master \
-g"-torch/csrc/jit/init.cpp" \
-g"-torch/csrc/jit/export.cpp" \
-g"-torch/csrc/jit/import.cpp" \
-g"-torch/csrc/jit/test_jit.cpp" \
"$@"
2 changes: 1 addition & 1 deletion torch/csrc/api/include/torch/detail/ordered_dict.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class OrderedDict {
}

// Move works by default, because you can move-construct vectors of const
// values..
// values.
// NB: I tried to make this noexcept (conditional on the move constructors of
// index_ and items_ being noexcept) but the obvious spelling didn't compile
// on Windows.
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/api/src/nn/modules/rnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,14 @@ void RNNImplBase<Derived>::flatten_parameters() {
// Cache the flattened weight and bias vector.
flat_weights_ = flat_weights();

if (!cudnn_mode_ || !torch::cudnn_is_acceptable(/*sample=*/w_ih.at(0))) {
if (!cudnn_mode_ || !torch::cudnn_is_acceptable(w_ih.at(0))) {
return;
}

NoGradGuard no_grad;
torch::_cudnn_rnn_flatten_weight(
flat_weights_,
/*weight_stride=*/options.with_bias_ ? 4 : 2,
/*weight_stride0=*/options.with_bias_ ? 4 : 2,
options.input_size_,
static_cast<int64_t>(*cudnn_mode_),
options.hidden_size_,
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/functions/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ auto CopySlices::apply(variable_list&& inputs) -> variable_list {
AT_ASSERT(res[i].defined());
if (i == 0) {
grad_slice.copy_(res[i]);
grad_inputs[i] = std::move(result);
grad_inputs[i] = std::move(result); // NOLINT(bugprone-use-after-move)
} else {
grad_inputs[i] = std::move(res[i]);
}
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/cuda/comm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ std::vector<at::Tensor> scatter(
std::vector<at::Tensor> chunks;
if (chunk_sizes) {
const int64_t chunk_size_sum =
std::accumulate(chunk_sizes->begin(), chunk_sizes->end(), 0);
std::accumulate(chunk_sizes->begin(), chunk_sizes->end(), int64_t{0});
AT_CHECK(
chunk_size_sum == tensor.size(dim),
"given chunk sizes don't sum up to the tensor's size ",
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/autodiff.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
returned_grad = returned_grad.unsqueeze(*it);
return {returned_grad};

} else if (node->matches("aten::squeeze(Tensor self, int dim) -> Tensor", /*const=*/attr::dim)) {
} else if (node->matches("aten::squeeze(Tensor self, int dim) -> Tensor", /*const_inputs=*/attr::dim)) {
int64_t dim = *node->get<int64_t>(attr::dim);
const auto& sizes = inputs.at(0).sizes();
wrapDim(dim, sizes);
Expand All @@ -318,7 +318,7 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
}
return {sizes.at(dim) > 1 ? grads.at(0) : grads.at(0).unsqueeze(dim), nullptr};

} else if (node->matches("aten::cat(Tensor[] tensors, int dim) -> Tensor", /*const=*/attr::dim)) {
} else if (node->matches("aten::cat(Tensor[] tensors, int dim) -> Tensor", /*const_inputs=*/attr::dim)) {
int dim = *node->get<int64_t>(attr::dim);
auto tensor_inputs = inputs; tensor_inputs.pop_back();
const auto& first_sizes = tensor_inputs.at(0).sizes();
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/custom_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ Node* getTracedNode(
const std::tuple<Types...>& tuple) {
auto symbol = Symbol::fromQualString(schema.name);
const auto& graph = tracer::getTracingState()->graph;
Node* node = graph->create(std::move(symbol), /*outputs=*/0);
Node* node = graph->create(std::move(symbol), /*num_outputs=*/0);
tracer::recordSourceLocation(node);

// Hack to call addInputs for the parameter pack in a sequenced fashion.
Expand Down Expand Up @@ -207,7 +207,7 @@ Operator createOperator(
ArgumentTuple tuple;
torch::jit::detail::callOperatorWithTuple(
schema,
std::move(implementation),
std::move(implementation), // NOLINT(bugprone-move-forwarding-reference)
stack,
tuple,
typename MakeIndices<std::tuple_size<ArgumentTuple>::value>::indices{});
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/export.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ void ModuleEncoder::EncodeTensor(
tensor.storage(),
/* storageOffset = */ 0,
/* size = */ { static_cast<int64_t>(tensor.storage().size()) },
/* strides = */ { 1 })
/* stride = */ { 1 })
.cpu();
}

Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/fusers/cuda/fused_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ struct CUDAFusedKernel : public ::torch::jit::FusedKernel {

virtual uint64_t get_rand_offset(uint32_t numel) override {
int numBlocks = std::min(maxBlocks, ceilDiv(numel, blockSize));
return 4 * (ceil(numel/(4 * blockSize * numBlocks)) + 1);
return 4 * (ceil(numel / (4.0 * blockSize * numBlocks)) + 1);
}

virtual void launch_raw(uint32_t numel, void ** arguments) override;
Expand All @@ -53,7 +53,7 @@ struct CUDAFusedKernel : public ::torch::jit::FusedKernel {
};

} // namespace cudafuser
} // namespace jit
} // namespace jit
} // namespace torch

#endif // USE_CUDA_FUSER
9 changes: 6 additions & 3 deletions torch/csrc/jit/import.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -372,15 +372,17 @@ ModuleDecoder::ModuleDecoder(
void import_ir_module(
ModuleLookup module_lookup,
std::istream& in) {
ModuleDecoder(module_lookup, in);
ModuleDecoder decoder(module_lookup, in);
(void)decoder;
}

void import_ir_module(
ModuleLookup module_lookup,
const std::string& filename) {
std::ifstream in(filename, std::ios_base::binary);

ModuleDecoder(module_lookup, in);
ModuleDecoder decoder(module_lookup, in);
(void)decoder;
}

std::shared_ptr<script::Module> load(std::istream& in) {
Expand All @@ -397,7 +399,8 @@ std::shared_ptr<script::Module> load(std::istream& in) {
return curr;
};

ModuleDecoder(module_lookup, in);
ModuleDecoder decoder(module_lookup, in);
(void)decoder;

return module;
}
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/passes/annotate_effects.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ class AnnotateEffectsImpl {
Value* addFenceForNode(Node* node, Value* curToken) {
// Add a start fence
auto startFence =
node->owningGraph()->create(prim::MemoryFence, /*outputs=*/0);
node->owningGraph()->create(prim::MemoryFence, /*num_outputs=*/0);

// Add world tokens as the first input and output
startFence->addInput(curToken);
Expand All @@ -282,7 +282,7 @@ class AnnotateEffectsImpl {

// Add an end fence
auto endFence =
node->owningGraph()->create(prim::MemoryFence, /*outputs=*/0);
node->owningGraph()->create(prim::MemoryFence, /*num_outputs=*/0);

// Add world tokens as the first input and output
endFence->addInput(curToken);
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/passes/canonicalize_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static void CanonicalizeOps(Block* block) {
// shape analysis and differentiation passes for those two individual ops.
// Later, we will fuse together those two ops into a single addmm.
if (it->matches("aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta, Scalar alpha) -> Tensor",
/*with_const=*/{attr::beta, attr::alpha})) {
/*const_inputs=*/{attr::beta, attr::alpha})) {
if (it->get<at::Scalar>(attr::alpha)->toDouble() != 1.0 ||
it->get<at::Scalar>(attr::beta)->toDouble() != 1.0) {
continue;
Expand All @@ -63,7 +63,7 @@ static void CanonicalizeOps(Block* block) {
it->output()->replaceAllUsesWith(result);
it.destroyCurrent();
} else if (it->matches("aten::chunk(Tensor self, int chunks, int dim) -> Tensor[]",
/*with_const=*/{attr::chunks, attr::dim})) {
/*const_inputs=*/{attr::chunks, attr::dim})) {
if (auto orig_outputs = getChunkOutputs(*it)) {
WithInsertPoint guard(*it);
SymbolicVariable self {it->namedInput(attr::self)};
Expand Down
26 changes: 13 additions & 13 deletions torch/csrc/jit/passes/graph_fuser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,32 +205,32 @@ struct GraphFuser {
if (!isSimpleMap(node)) return false;

if (node->matches("aten::add(Tensor self, Tensor other, *, Scalar alpha) -> Tensor",
/*const=*/attr::alpha) ||
/*const_inputs=*/attr::alpha) ||
node->matches("aten::add(Tensor self, Scalar other, Scalar alpha) -> Tensor",
/*const=*/{attr::other, attr::alpha}) ||
/*const_inputs=*/{attr::other, attr::alpha}) ||
node->matches("aten::sub(Tensor self, Tensor other, *, Scalar alpha) -> Tensor",
/*const=*/attr::alpha) ||
/*const_inputs=*/attr::alpha) ||
node->matches("aten::sub(Tensor self, Scalar other, Scalar alpha) -> Tensor",
/*const=*/{attr::other, attr::alpha}) ||
node->matches("aten::mul(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other) ||
node->matches("aten::div(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other) ||
node->matches("aten::clamp(Tensor self, Scalar min, Scalar max) -> Tensor", /*const=*/{attr::min, attr::max})) {
/*const_inputs=*/{attr::other, attr::alpha}) ||
node->matches("aten::mul(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::div(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::clamp(Tensor self, Scalar min, Scalar max) -> Tensor", /*const_inputs=*/{attr::min, attr::max})) {
auto inputs = tensorInputs(node);
return haveSupportedType(inputs);
}
else if (
node->matches("aten::lt(Tensor self, Tensor other) -> Tensor") ||
node->matches("aten::lt(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other) ||
node->matches("aten::lt(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::le(Tensor self, Tensor other) -> Tensor") ||
node->matches("aten::le(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other) ||
node->matches("aten::le(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::gt(Tensor self, Tensor other) -> Tensor") ||
node->matches("aten::gt(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other) ||
node->matches("aten::gt(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::ge(Tensor self, Tensor other) -> Tensor") ||
node->matches("aten::ge(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other) ||
node->matches("aten::ge(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::eq(Tensor self, Tensor other) -> Tensor") ||
node->matches("aten::eq(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other) ||
node->matches("aten::eq(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::ne(Tensor self, Tensor other) -> Tensor") ||
node->matches("aten::ne(Tensor self, Scalar other) -> Tensor", /*const=*/attr::other)) {
node->matches("aten::ne(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other)) {
// comparison operators produce Byte type, and it's ok, check only inputs
auto inputs = tensorInputs(node);
return haveSupportedType(inputs);
Expand Down
12 changes: 6 additions & 6 deletions torch/csrc/jit/passes/peephole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ void PeepholeOptimize(Block * block) {
// XXX: remember that if you want to simplify an expression by combining multiple nodes
// into a different one, then you need to check that they all belong to the given block
if (node->matches("aten::expand(Tensor self, int[] size, *, bool implicit) -> Tensor",
/*with_const=*/attr::size)) {
/*const_inputs=*/attr::size)) {
// x.expand(x.size()) == x
if (auto input_type = node->namedInput(attr::self)->type()->cast<CompleteTensorType>()) {
auto expanded_sizes = node->get<std::vector<int64_t>>(attr::size);
Expand All @@ -49,7 +49,7 @@ void PeepholeOptimize(Block * block) {
node->output()->replaceAllUsesWith(node->input(0));
}
} else if (node->matches("aten::add(Tensor self, Tensor other, *, Scalar alpha) -> Tensor",
/*with_const=*/attr::alpha)) {
/*const_inputs=*/attr::alpha)) {
// z + x.mm(y) == z.addmm(x, y) == x.mm(y) + z
// This optimization has been disabled at the moment, because it's not helpful at all
// until we will be able to represent torch.addmm(a, b, c, out=a). That's because addmm
Expand Down Expand Up @@ -101,14 +101,14 @@ void PeepholeOptimize(Block * block) {
}
}
// TODO: this doesn't work with Scalar-Tensor ops! We should canonicalize those
} else if (node->matches("aten::mul(Tensor self, Scalar other) -> Tensor", /*with_const=*/attr::other) ||
node->matches("aten::div(Tensor self, Scalar other) -> Tensor", /*with_const=*/attr::other)) {
} else if (node->matches("aten::mul(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other) ||
node->matches("aten::div(Tensor self, Scalar other) -> Tensor", /*const_inputs=*/attr::other)) {
// x * 1 == x / 1 == x
if (node->get<at::Scalar>(attr::other)->toDouble() == 1) {
node->output()->replaceAllUsesWith(node->input(0));
}
} else if (node->matches("aten::add(Tensor self, Scalar other, Scalar alpha) -> Tensor", /*with_const=*/{attr::alpha, attr::other}) ||
node->matches("aten::sub(Tensor self, Scalar other, Scalar alpha) -> Tensor", /*with_const=*/{attr::alpha, attr::other})) {
} else if (node->matches("aten::add(Tensor self, Scalar other, Scalar alpha) -> Tensor", /*const_inputs=*/{attr::alpha, attr::other}) ||
node->matches("aten::sub(Tensor self, Scalar other, Scalar alpha) -> Tensor", /*const_inputs=*/{attr::alpha, attr::other})) {
// x + 0 == x - 0 == x
if (node->get<at::Scalar>(attr::alpha)->toDouble() == 1 &&
node->get<at::Scalar>(attr::other)->toDouble() == 0) {
Expand Down
Loading

0 comments on commit 033e957

Please sign in to comment.