Skip to content

Commit

Permalink
Enable cppcoreguidelines checks in clang-tidy (pytorch#12959)
Browse files Browse the repository at this point in the history
Summary:
Enables most of `cppcoreguidelines-*` checks for clang-tidy. Major fixes included:

- Uninitialized members,
- Use of `const_cast`,
- Use of raw `new`

ezyang apaszke
Pull Request resolved: pytorch#12959

Differential Revision: D11349285

Pulled By: goldsborough

fbshipit-source-id: 9e24d643787dfe7ede69f96223c8c0179bd1b2d6
  • Loading branch information
goldsborough authored and facebook-github-bot committed Oct 30, 2018
1 parent 8260441 commit 6071389
Show file tree
Hide file tree
Showing 17 changed files with 63 additions and 31 deletions.
12 changes: 11 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
@@ -1,10 +1,20 @@
---
# NOTE: there must be no spaces before the '-', so put the comma first.
# NOTE there must be no spaces before the '-', so put the comma first.
Checks: '
-*
,modernize-deprecated-headers
,bugprone-*
,-bugprone-macro-parentheses
,-bugprone-forward-declaration-namespace
,cppcoreguidelines-*
,-cppcoreguidelines-pro-bounds-array-to-pointer-decay
,-cppcoreguidelines-pro-type-static-cast-downcast
,-cppcoreguidelines-pro-bounds-pointer-arithmetic
,-cppcoreguidelines-pro-bounds-constant-array-index
,-cppcoreguidelines-pro-type-reinterpret-cast
,-cppcoreguidelines-special-member-functions
,-cppcoreguidelines-interfaces-global-init
,-cppcoreguidelines-owning-memory
'
WarningsAsErrors: '*'
HeaderFilterRegex: 'torch/csrc/.*'
Expand Down
2 changes: 2 additions & 0 deletions tools/autograd/gen_autograd_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@ def save_arg(arg, is_output):
unpack.append('auto {} = unpack_list({}_);'.format(name, name))
elif arg['type'] == 'IntList':
saved_variables.append('std::vector<int64_t> {};'.format(name))
elif arg['type'] == 'int64_t':
saved_variables.append('{} {} = 0;'.format(arg['type'], name))
else:
saved_variables.append('{} {};'.format(arg['type'], name))

Expand Down
6 changes: 4 additions & 2 deletions tools/autograd/templates/VariableType.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,10 @@ struct TORCH_API VariableType final : public at::TypeDefault {

private:
// checks that t is actually a Variable
static Variable & checked_cast_variable(const Tensor & t, const char * name, int pos);
static at::Tensor & unpack(const Tensor & t, const char * name, int pos);
static const Variable & checked_cast_variable(const Tensor & t, const char * name, int pos);
static Variable & checked_cast_variable(Tensor & t, const char * name, int pos);
static at::Tensor & unpack(Tensor & t, const char * name, int pos);
static const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
static at::SparseTensorRef unpack(SparseTensorRef t, const char * name, int pos);
static at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
static std::vector<at::Tensor> unpack(at::TensorList tl, const char *name, int pos);
Expand Down
26 changes: 21 additions & 5 deletions torch/csrc/autograd/VariableTypeManual.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#include "c10/util/Optional.h"
#include "torch/csrc/autograd/VariableTypeUtils.h"

#include <torch/csrc/utils/memory.h>

using namespace at;
using namespace torch::autograd::generated;

Expand Down Expand Up @@ -75,7 +77,7 @@ void register_variable_type_for(TypeExtendedInterface* baseType) {
if(type_to_variable_type.size() <= base_id) {
type_to_variable_type.resize(base_id + 1);
}
type_to_variable_type[base_id].reset(new VariableType(&at::globalContext(), baseType));
type_to_variable_type[base_id] = torch::make_unique<VariableType>(&at::globalContext(), baseType);
}

struct VariableTypeRegistry {
Expand Down Expand Up @@ -174,17 +176,31 @@ std::vector<at::Type*> VariableType::allCUDATypes() {
return allTypesForBackends({ Backend::CUDA, Backend::SparseCUDA });
}

Variable & VariableType::checked_cast_variable(const Tensor & t, const char * name, int pos) {
const Variable & VariableType::checked_cast_variable(const Tensor & t, const char * name, int pos) {
if (!t.defined()) {
AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #", pos, " '", name, "'");
}
if (!isVariableType(t.type())) {
AT_ERROR("Expected object of type Variable but found type ", t.type().toString(), " for argument #", pos, " '", name, "'");
}
return as_variable_ref(t);
}

Variable & VariableType::checked_cast_variable(Tensor & t, const char * name, int pos) {
if (!t.defined()) {
AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #", pos, " '", name, "'");
}
if (!isVariableType(t.type())) {
AT_ERROR("Expected object of type Variable but found type ", t.type().toString(), " for argument #", pos, " '", name, "'");
}
return as_variable_ref(const_cast<Tensor&>(t));
return as_variable_ref(t);
}

const Tensor & VariableType::unpack(const Tensor & t, const char * name, int pos) {
return checked_cast_variable(t, name, pos).data();
}

Tensor & VariableType::unpack(const Tensor & t, const char * name, int pos) {
Tensor & VariableType::unpack(Tensor & t, const char * name, int pos) {
return checked_cast_variable(t, name, pos).data();
}

Expand Down Expand Up @@ -310,7 +326,7 @@ Tensor VariableType::detach(const Tensor & self) const {

}
// <NON_GENERATED_CODE>
auto result = as_variable_ref(const_cast<Tensor&>(self)).detach();
auto result = as_variable_ref(const_cast<Tensor&>(self)).detach(); // NOLINT(cppcoreguidelines-pro-type-const-cast)
// </NON_GENERATED_CODE>
if (jit::tracer::isTracing()) {
jit::tracer::addOutput(node, result);
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/autograd/VariableTypeUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,13 @@ inline void rebase_history(Variable& var, std::shared_ptr<Function> grad_fn) {
}
}

inline void rebase_history(ArrayRef<Variable> vars, std::shared_ptr<Function> grad_fn) {
inline void rebase_history(std::vector<Variable>&& vars, std::shared_ptr<Function> grad_fn) {
if (grad_fn) {
for (auto& var : vars) {
if (var.defined()) {
// TODO: eliminate const_cast
auto output_nr = grad_fn->add_input_metadata(var);
const_cast<Variable&>(var).rebase_history({grad_fn, output_nr});
var.rebase_history({grad_fn, output_nr});
} else {
grad_fn->add_input_metadata(Function::undefined_input());
}
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/functions/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ namespace torch { namespace autograd {
struct CopyBackwards : public Function {
variable_list apply(variable_list&& grads) override;

at::Type *src_type;
at::Type *src_type = nullptr; // initialized for safety.
int32_t src_device = -1;
};

Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/autograd/profiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ inline int64_t getTime() {
return duration_cast<nanoseconds>(clock::now().time_since_epoch()).count();
#else
// clock_gettime is *much* faster than std::chrono implementation on Linux
struct timespec t;
struct timespec t{};
clock_gettime(CLOCK_MONOTONIC, &t);
return static_cast<int64_t>(t.tv_sec) * 1000000000 + static_cast<int64_t>(t.tv_nsec);
#endif
Expand Down Expand Up @@ -123,7 +123,7 @@ struct Event final {
return device_;
}
private:
int64_t cpu_ns_; // signed to allow for negative intervals
int64_t cpu_ns_ = 0; // signed to allow for negative intervals, initialized for safety.
// std::string is a very large object (usually around 32B),
// and this field is used only for user-created ranges, so
// it's better to save on size of Events.
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/autodiff.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ struct Gradient {
// Describes how to construct outputs of f from what its graph will return.
// This is necessary because some trailing outputs are intermediates produced
// only to be saved for df (and should be ignored).
size_t f_real_outputs;
size_t f_real_outputs = 0; // initialized for safety.

// df inputs are split into two sections: vjps (aka grad_outputs) and captures.
// VJPs are "seeds" for the gradient computation given for each input capture
Expand Down
7 changes: 4 additions & 3 deletions torch/csrc/jit/dynamic_dag.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <vector>

#include "torch/csrc/utils/functional.h"
#include "torch/csrc/utils/memory.h"

namespace torch { namespace jit { namespace detail {

Expand Down Expand Up @@ -187,7 +188,7 @@ size_t DynamicDAG<T>::debugNumVertices() const {

template <typename T>
Vertex<T>* DynamicDAG<T>::newVertex(T datum) {
vertices_.emplace_back(new Vertex<T>(vertices_.size(), datum));
vertices_.push_back(torch::make_unique<Vertex<T>>(vertices_.size(), datum));
return vertices_.back().get();
}

Expand Down Expand Up @@ -332,15 +333,15 @@ void DynamicDAG<T>::addEdge(Vertex<T>* producer, Vertex<T>* consumer) {
// Search for vertices that are reachable from consumer that have a now incorrect
// topological ordering.
if (dfsSearch(DFSDirection::forward, consumer, producer,
/*upper_bound=*/producer->ord, deltaF)) {
/*bound=*/producer->ord, deltaF)) {
// Path found! This means there's a cycle.
AT_ERROR("Cycle detected while trying to add edge.");
}

// Search for vertices that can reach producer that have a now incorrect
// topological ordering
JIT_ASSERT(!dfsSearch(DFSDirection::backward, producer, consumer,
/*lower_bound=*/consumer->ord, deltaB));
/*bound=*/consumer->ord, deltaB));

// Reorder the vertices that are reachable from consumer to occur BEFORE
// the vertices that can reach producer.
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/jit/fusers/common/fused_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ struct FusedKernel {
void launch(
at::ArrayRef<at::Tensor> inputs
, std::vector<at::Tensor>& outputs);

const std::vector<TensorDesc>& outputDescriptors() const {
return output_desc;
}
Expand All @@ -64,7 +64,7 @@ struct FusedKernel {
virtual void launch_raw(uint32_t numel, void** arguments) = 0;

virtual uint64_t get_rand_offset(uint32_t numel) = 0;
bool has_random;
bool has_random = false;
std::string name;
// We keep these around for debugging
std::string compilation_unit;
Expand All @@ -82,7 +82,7 @@ struct FusedKernel {
std::vector<PartitionDesc> chunk_desc;
};

} // namespace jit
} // namespace jit
} // namespace torch

#endif // USE_CPU_FUSER || USE_CUDA_FUSER
2 changes: 1 addition & 1 deletion torch/csrc/jit/graph_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ struct ExecutionPlanState {
};

struct GraphExecutorState {
const Graph* graph;
const Graph* graph = nullptr;
ExecutionPlanState fallback; // XXX: members of this field are optional
std::unordered_map<ArgumentSpec, ExecutionPlanState> execution_plans;
};
Expand Down
6 changes: 4 additions & 2 deletions torch/csrc/jit/graph_node_list.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,10 @@ struct generic_graph_node_list_iterator {
: cur(nullptr), d(kNextDirection) {}
generic_graph_node_list_iterator(T * cur, int d)
: cur(cur), d(d) {}
generic_graph_node_list_iterator(const generic_graph_node_list_iterator & rhs)
: cur(rhs.cur), d(rhs.d) {}
generic_graph_node_list_iterator(const generic_graph_node_list_iterator & rhs) = default;
generic_graph_node_list_iterator(generic_graph_node_list_iterator && rhs) = default;
generic_graph_node_list_iterator& operator=(const generic_graph_node_list_iterator & rhs) = default;
generic_graph_node_list_iterator& operator=(generic_graph_node_list_iterator && rhs) = default;
T * operator*() const { return cur; }
T * operator->() const { return cur; }
generic_graph_node_list_iterator & operator++() {
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/jit/ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,7 @@ void Node::lint() const {
size_t i = 0;
for (auto input : inputs_) {
// WARNING: O(n^2)
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
JIT_ASSERT(std::find(ALL_OF(input->uses_), Use(const_cast<Node*>(this), i)) != input->uses_.end());
JIT_ASSERT(graph_->all_nodes.count(this) == 1);
i++;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/named_value.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ struct NamedValue {
private:
c10::optional<SourceRange> loc_;
c10::optional<std::string> name_;
Value* value_;
Value* value_{nullptr};
// only valid if value_ == nullptr;
IValue ivalue_;
};
Expand Down
8 changes: 3 additions & 5 deletions torch/csrc/jit/passes/batch_mm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,7 @@ static constexpr size_t min_fusion_size = 2;

static std::array<int64_t, 2> as_array(at::IntList sizes) {
JIT_ASSERT(sizes.size() == 2);
std::array<int64_t, 2> arr;
arr[0] = sizes[0];
arr[1] = sizes[1];
std::array<int64_t, 2> arr = {sizes[0], sizes[1]};
return arr;
}

Expand All @@ -85,8 +83,8 @@ static std::array<int64_t, 2> as_array(at::IntList sizes) {
// and build a larger tree.
struct TreeToken {
uint64_t tree_size = 0; // NOTE: measured in number of leaves i.e. mm ops
std::array<int64_t, 2> lhs_sizes;
std::array<int64_t, 2> rhs_sizes;
std::array<int64_t, 2> lhs_sizes{{0, 0}};
std::array<int64_t, 2> rhs_sizes{{0, 0}};
Node *node = nullptr;
bool is_root = false;

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/script/lexer.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#include "torch/csrc/jit/assertions.h"
#include "torch/csrc/jit/source_range.h"
#include <torch/csrc/utils/memory.h>
#include <locale.h>
#include <clocale>

namespace torch {
namespace jit {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/script/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ struct Method {
}

Method& setSchema(FunctionSchema schema_) {
schema.reset(new FunctionSchema(std::move(schema_)));
schema = torch::make_unique<FunctionSchema>(std::move(schema_));
return *this;
}

Expand Down

0 comments on commit 6071389

Please sign in to comment.