Skip to content

Commit

Permalink
Match parameter names and = default (pytorch#9737)
Browse files Browse the repository at this point in the history
Summary:
More clang tidy cleanups in `torch/csrc`. This time:

1. `hicpp-use-equals-default` recommends `= default` instead of `{}` for constructors/destructors. This is better practice because it expresses the intent better (https://stackoverflow.com/questions/6502828/what-does-default-mean-after-a-class-function-declaration)
2. `readability-inconsistent-declaration-parameter-name` enforces that parameter names in the declaration match parameter names in the definition. This is just generally useful and can prevent confusion and bugs.

Also updated my script a little bit.

apaszke ezyang
Pull Request resolved: pytorch#9737

Differential Revision: D9069069

Pulled By: goldsborough

fbshipit-source-id: f7b3f3a4eb4c9fadc30425a153566d3b613a41ae
  • Loading branch information
goldsborough authored and facebook-github-bot committed Jul 30, 2018
1 parent 40a8239 commit 04939a4
Show file tree
Hide file tree
Showing 42 changed files with 137 additions and 84 deletions.
5 changes: 4 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@
# NOTE: there must be no spaces before the '-', so put the comma first.
Checks: '
*
,clang-analyzer-*
,modernize-*
,-cert-err58-cpp
,-cert-err60-cpp
,-clang-diagnostic-*
,-cppcoreguidelines-owning-memory
,-cppcoreguidelines-pro-bounds-array-to-pointer-decay
,-cppcoreguidelines-pro-bounds-constant-array-index
,-cppcoreguidelines-pro-type-member-init
,-cppcoreguidelines-pro-type-static-cast-downcast
,-cppcoreguidelines-pro-type-vararg
,-cppcoreguidelines-special-member-functions
Expand All @@ -23,9 +25,11 @@ Checks: '
,-hicpp-braces-around-statements
,-hicpp-explicit-conversions
,-hicpp-no-array-decay
,-hicpp-signed-bitwise
,-hicpp-special-member-functions
,-hicpp-vararg
,-llvm-header-guard
,-llvm-include-order
,-llvm-namespace-comment
,-misc-unused-parameters
,-modernize-make-unique
Expand All @@ -34,7 +38,6 @@ Checks: '
,-readability-braces-around-statements
,-readability-else-after-return
,-readability-named-parameter
,clang-analyzer-*
'
WarningsAsErrors: ''
HeaderFilterRegex: 'torch/csrc/'
Expand Down
52 changes: 46 additions & 6 deletions tools/clang_tidy.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import subprocess
import sys


DEFAULT_FILE_PATTERN = r".*\.[ch](pp)?"

# @@ -start,count +start,count @@
Expand All @@ -26,6 +27,11 @@ def run_shell_command(arguments, process_name=None):
return output.decode()


def normalize_directory_path(path):
"""Normalizes a directory path."""
return path.rstrip('/')


def transform_globs_into_regexes(globs):
"""Turns glob patterns into regular expressions."""
return [glob.replace("*", ".*").replace("?", ".") for glob in globs]
Expand All @@ -49,16 +55,37 @@ def git_diff(args, verbose):
return run_shell_command(command, process_name="git diff")


def filter_files(files, file_patterns):
def filter_files(files, file_patterns, verbose):
"""Returns all files that match any of the patterns."""
filtered = []
for file in files:
has_match = False
for pattern in file_patterns:
if pattern.match(file):
if pattern.search(file):
filtered.append(file)
has_match = True
if not has_match and verbose:
message = "{} does not match any ".format(file)
message += "file pattern in {{{}}}".format(', '.join(map(str, file_patterns)))
print(message)
return filtered


def remove_recursive_files(files, paths, verbose):
"""
Removes all files that are not immediately under one of the given paths.
"""
for file in files:
if os.path.dirname(file) in paths:
yield file
else:
if verbose:

message = "{} ({}) does not match any ".format(file, os.path.dirname(file))
message += "non-recursive path in {{{}}}".format(", ".join(paths))
print(message)


def get_changed_files(revision, paths, verbose):
"""Runs git diff to get the paths of all changed files."""
# --diff-filter AMU gets us files that are (A)dded, (M)odified or (U)nmerged (in the working copy).
Expand Down Expand Up @@ -152,7 +179,17 @@ def parse_options():
)
parser.add_argument("-r", "--revision", help="Git revision to get changes from")
parser.add_argument(
"-p", "--paths", nargs="+", default=["."], help="Lint only the given paths"
"-p",
"--paths",
nargs="+",
default=["."],
help="Lint only the given paths (recursively)",
)
parser.add_argument(
"-n",
"--no-recursive",
action="store_true",
help="If paths are supplied with -p/--paths, do not recurse into paths",
)
parser.add_argument(
"-s",
Expand All @@ -173,12 +210,15 @@ def parse_options():

def main():
options = parse_options()
paths = map(normalize_directory_path, options.paths)
if options.revision:
files = get_changed_files(options.revision, options.paths, options.verbose)
files = get_changed_files(options.revision, paths, options.verbose)
else:
files = get_all_files(options.paths)
files = get_all_files(paths)
if options.no_recursive:
files = remove_recursive_files(files, paths, options.verbose)
file_patterns = get_file_patterns(options.glob, options.regex)
files = filter_files(files, file_patterns)
files = filter_files(files, file_patterns, options.verbose)

# clang-tidy error's when it does not get input files.
if not files:
Expand Down
1 change: 1 addition & 0 deletions tools/cpp_build/build_caffe2.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ cmake -DUSE_CUDA:BOOL=$USE_CUDA \
-DCMAKE_BUILD_TYPE:STRING=$BUILD_TYPE \
-DCMAKE_INSTALL_PREFIX:STRING=$INSTALL_PREFIX \
-DCMAKE_INSTALL_MESSAGE=NEVER \
-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON \
-G "$GENERATE" \
$PYTORCHPATH/
$MAKE -j "$JOBS" install
Expand Down
1 change: 1 addition & 0 deletions tools/cpp_build/build_libtorch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ cmake -DUSE_CUDA:BOOL=$USE_CUDA \
-DCMAKE_INSTALL_MESSAGE=NEVER \
-Dnanopb_BUILD_GENERATOR:BOOL=OFF \
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON \
-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON \
-DVERBOSE:BOOL=${VERBOSE:-0} \
-G "$GENERATE" \
$PYTORCHPATH/torch
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/api/include/torch/nn/cursor.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class CursorBase {

/// A `(key, value)` pair exposed by cursor iterators.
struct Item {
Item(const std::string& key_, T& module_);
Item(const std::string& key_, T& value_);

T& operator*();
const T& operator*() const;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/anomaly_mode.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ struct AnomalyMode {


struct AnomalyMetadata {
virtual ~AnomalyMetadata(){};
virtual ~AnomalyMetadata() = default;
virtual void store_stack() = 0;
virtual void print_stack() = 0;
};
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/autograd/engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ struct GraphTask {
std::unordered_map<Function*, ExecInfo> exec_info;
std::vector<Variable> captured_vars;

void init_to_execute(Function& graph_root, const edge_list& captures);
void init_to_execute(Function& graph_root, const edge_list& outputs);

// The value of worker_device in the thread that created this task.
// See Note [Reentrant backwards]
Expand Down Expand Up @@ -499,14 +499,14 @@ struct ClearCallbacks {
std::mutex& callbacks_lock;
};

auto Engine::execute(const edge_list& input_roots,
auto Engine::execute(const edge_list& roots,
const variable_list& inputs,
bool keep_graph,
bool create_graph,
const edge_list& outputs) -> variable_list {
std::call_once(start_threads_flag, &Engine::start_threads, this);

validate_outputs(input_roots, const_cast<variable_list&>(inputs), [](const std::string& msg) {
validate_outputs(roots, const_cast<variable_list&>(inputs), [](const std::string& msg) {
return msg;
});

Expand All @@ -517,7 +517,7 @@ auto Engine::execute(const edge_list& input_roots,
std::unique_lock<std::mutex> lock(graph_task.mutex);

// Now compute the dependencies for all executable functions and queue the root
auto graph_root = std::make_shared<GraphRoot>(input_roots, inputs);
auto graph_root = std::make_shared<GraphRoot>(roots, inputs);
compute_dependencies(graph_root.get(), graph_task);
if (!outputs.empty()) {
graph_task.init_to_execute(*graph_root, outputs);
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ struct TORCH_API Engine {
ReadyQueue& ready_queue(int device);
void start_threads();
virtual void thread_init(int device);
virtual void thread_main(GraphTask *task);
virtual void thread_main(GraphTask *graph_task);
virtual void thread_on_exception(FunctionTask& task, std::exception& e);

std::once_flag start_threads_flag;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/function.h
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ struct TORCH_API Function : std::enable_shared_from_this<Function> {
/// See Function::is_traceable() for definition.
struct TraceableFunction : public Function {
using Function::Function;
bool is_traceable() final override {
bool is_traceable() final {
return true;
}
};
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/autograd/function_hook.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@ struct Variable;
using variable_list = std::vector<Variable>;

struct FunctionPreHook {
virtual ~FunctionPreHook() {}
virtual ~FunctionPreHook() = default;
virtual variable_list operator()(const variable_list& grads) = 0;
};

struct FunctionPostHook {
virtual ~FunctionPostHook() {}
virtual ~FunctionPostHook() = default;
virtual variable_list operator()(const variable_list& grad_input, const variable_list& grad_output) = 0;
};

Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/autograd/functions/accumulate_grad.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
namespace torch { namespace autograd {

struct AccumulateGrad : public Function {
explicit AccumulateGrad(Variable variable);
explicit AccumulateGrad(Variable variable_);

variable_list apply(variable_list&& inputs) override;
variable_list apply(variable_list&& grads) override;

Variable variable;
};
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/functions/basic_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

namespace torch { namespace autograd {

auto Error::apply(variable_list&& grad_outputs) -> variable_list {
auto Error::apply(variable_list&& inputs) -> variable_list {
throw std::runtime_error(msg);
}

Expand Down
9 changes: 6 additions & 3 deletions torch/csrc/autograd/functions/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
namespace torch { namespace autograd {

struct CopyBackwards : public Function {
variable_list apply(variable_list&& inputs) override;
variable_list apply(variable_list&& grads) override;

at::Type *src_type;
int32_t src_device = -1;
Expand All @@ -23,9 +23,12 @@ struct CopyBackwards : public Function {
// grad[idx] is defined by the relative sizes, strides, and offset of base and
// view.
struct CopySlices : public Function {
CopySlices(const Variable& base, at::TensorGeometry view, std::shared_ptr<Function> fn);
CopySlices(
const Variable& base_var,
at::TensorGeometry view_,
std::shared_ptr<Function> fn_);

variable_list apply(variable_list&& grads) override;
variable_list apply(variable_list&& inputs) override;
void release_variables() override;

at::TensorGeometry base;
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/autograd/input_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ struct InputBuffer {
InputBuffer& operator=(InputBuffer&& other) = default;

// Accumulates the variable at a specified index.
void add(size_t idx, Variable var);
void add(size_t pos, Variable var);

int device() const;

Variable operator[](size_t pos) { return buffer[pos]; }

// Returns the inputs as a list of variables. Destroys given InputBuffer.
static std::vector<Variable> variables(InputBuffer&& buffer);
static std::vector<Variable> variables(InputBuffer&& g);

private:
std::vector<Variable> buffer;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/profiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ struct TORCH_API RecordFunction {
using thread_event_lists = std::vector<std::vector<Event>>;
// NOTE: changing profiler modes is **NOT THREAD SAFE**. You should ensure that
// there no autograd functions are being executed when these function are used.
TORCH_API void enableProfiler(ProfilerState state);
TORCH_API void enableProfiler(ProfilerState new_state);
TORCH_API thread_event_lists disableProfiler();

} // namespace profiler
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/autograd/saved_variable.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ class TORCH_API SavedVariable {
std::weak_ptr<Function> grad_accumulator_;
VariableVersion version_counter_;

uint32_t saved_version_;
uint32_t output_nr_;
uint32_t saved_version_ = 0;
uint32_t output_nr_ = 0;
bool was_default_constructed_ = true;
bool requires_grad_;
bool has_grad_fn_;
bool requires_grad_ = false;
bool has_grad_fn_ = false;
};
}} // namespace torch::autograd
2 changes: 1 addition & 1 deletion torch/csrc/autograd/variable.h
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ struct Variable::Impl : public at::TensorImpl {
TORCH_API explicit Impl(
at::Tensor data,
bool requires_grad = false,
Edge edge = Edge());
Edge gradient_edge = Edge());

~Impl() override;

Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/attributes.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ struct AttributeValue {
Symbol name;
virtual AttributeKind kind() const = 0;
virtual Ptr clone() const = 0;
virtual ~AttributeValue() {}
virtual ~AttributeValue() = default;
};

template<typename T, AttributeKind Kind>
Expand Down Expand Up @@ -101,7 +101,7 @@ struct AttributeError : public std::exception {
// we return Derived* pointers because Nodes are normally held as pointers.
template<typename Derived>
struct Attributes {
Attributes() {}
Attributes() = default;
void copyAttributes(const Attributes & rhs) {
values_.clear();
for(auto & i : rhs.values_) {
Expand Down
12 changes: 6 additions & 6 deletions torch/csrc/jit/autodiff.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <torch/csrc/jit/assertions.h>

#include <algorithm>
#include <memory>

namespace torch { namespace jit {

Expand Down Expand Up @@ -564,14 +565,13 @@ static void lambdaLiftReverse(Gradient& grad_desc, ReverseDetails& rev_info) {
reverse_block->owningNode()->destroy();
}

Gradient differentiate(std::shared_ptr<Graph>& _graph, const std::vector<bool>& requires_grad) {
Gradient differentiate(std::shared_ptr<Graph>& graph, const std::vector<bool>& requires_grad) {
Gradient grad_desc;
// Take ownership of the graph
JIT_ASSERTM(
_graph.use_count() == 1,
"differentiate will mutate and destroy the graph, so it requires "
"graph.use_count() == 1, but found ", _graph.use_count());
std::swap(_graph, grad_desc.f);
JIT_ASSERTM(graph.use_count() == 1,
"differentiate will mutate and destroy the graph, so it requires "
"graph.use_count() == 1, but found %d", graph.use_count());
std::swap(graph, grad_desc.f);
// XXX: Take care when handling outputs - they can be duplicated!

WithInsertPoint guard(grad_desc.f->block());
Expand Down
2 changes: 2 additions & 0 deletions torch/csrc/jit/autodiff.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@
#include "torch/csrc/jit/ir.h"

#include <ATen/ATen.h>

#include <vector>
#include <memory>

namespace torch { namespace jit {

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/fusion_compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ struct CompiledFusionFunction {
TH_DISALLOW_COPY_AND_ASSIGN(CompiledFusionFunction);

CompiledFusionFunction(const std::string & name, AnnotatedGraph & agraph);
virtual ~CompiledFusionFunction() {}
virtual ~CompiledFusionFunction() = default;

// expects outputs to be pre-allocated
void launch_with_tensors(at::ArrayRef<at::Tensor> inputs, at::ArrayRef<at::Tensor> outputs);
Expand Down
Loading

0 comments on commit 04939a4

Please sign in to comment.