Skip to content

Commit

Permalink
Add modernize-* checks to clang-tidy (pytorch#13196)
Browse files Browse the repository at this point in the history
Summary:
Enables almost all `modernize-*` checks in clang-tidy. This warns against things such as:

- Use of `const std::string&` instead of new-style `std::string` + move,
- Using old-style loops instead of range-for loops,
- Use of raw `new`
- Use of `push_back` instead of `emplace_back`
- Use of `virtual` together with `override` (`override` is sufficient)

ezyang
Pull Request resolved: pytorch#13196

Differential Revision: D12891837

Pulled By: goldsborough

fbshipit-source-id: 4d0f782a09eb391ee718d3d66f74c095ee121c09
  • Loading branch information
goldsborough authored and facebook-github-bot committed Nov 3, 2018
1 parent 4bca51e commit 0479517
Show file tree
Hide file tree
Showing 34 changed files with 176 additions and 158 deletions.
5 changes: 4 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
# NOTE there must be no spaces before the '-', so put the comma first.
Checks: '
-*
,modernize-deprecated-headers
,bugprone-*
,-bugprone-macro-parentheses
,-bugprone-forward-declaration-namespace
Expand All @@ -18,6 +17,10 @@ Checks: '
,hicpp-signed-bitwise
,hicpp-exception-baseclass
,hicpp-avoid-goto
,modernize-*
,-modernize-use-default-member-init
,-modernize-return-braced-init-list
,-modernize-use-auto
'
WarningsAsErrors: '*'
HeaderFilterRegex: 'torch/csrc/.*'
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/templates/VariableType.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ using c10::optional;
struct TORCH_API VariableType final : public at::TypeDefault {
VariableType(Context* context, at::TypeExtendedInterface* baseType);
at::ScalarType scalarType() const override;
virtual caffe2::TypeMeta typeMeta() const override;
caffe2::TypeMeta typeMeta() const override;
at::Backend backend() const override;
at::Allocator* allocator() const override;
at::Device getDeviceFromPtr(void * data) const override;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/api/include/torch/nn/cloneable.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ class Cloneable : public virtual Module {
}

private:
void clone_(Module& other, optional<Device> device) final override {
void clone_(Module& other, optional<Device> device) final {
// Here we are *pretty* certain that `other's` type is `Derived` (because it
// was registered under the same name as `this`), but you never know what
// crazy things `reset()` does, so `dynamic_cast` just to be safe.
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/api/include/torch/nn/cursor.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class CursorBase {

/// A `(key, value)` pair exposed by cursor iterators.
struct Item {
Item(const std::string& key_, T& value_);
Item(std::string key_, T& value_);

T& operator*();
const T& operator*() const;
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/api/src/nn/cursor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ namespace detail {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CursorBase::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

template <typename T>
CursorBase<T>::Item::Item(const std::string& key_, T& value_)
: key(key_), value(value_) {}
CursorBase<T>::Item::Item(std::string key_, T& value_)
: key(std::move(key_)), value(value_) {}

template <typename T>
T& CursorBase<T>::Item::operator*() {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/api/src/nn/modules/rnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ RNNImplBase<Derived>::RNNImplBase(
int64_t number_of_gates)
: options(options_),
number_of_gates_(number_of_gates),
cudnn_mode_(cudnn_mode) {
cudnn_mode_(std::move(cudnn_mode)) {
reset();
}

Expand Down
6 changes: 4 additions & 2 deletions torch/csrc/autograd/VariableTypeManual.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "c10/util/Optional.h"
#include "torch/csrc/autograd/VariableTypeUtils.h"
#include "torch/csrc/utils/memory.h"

#include <torch/csrc/utils/memory.h>

Expand Down Expand Up @@ -73,11 +74,12 @@ std::vector<std::unique_ptr<Type>> type_to_variable_type;
// XXX - this is not threadsafe with uses of Variables
void register_variable_type_for(TypeExtendedInterface* baseType) {
AT_ASSERT(baseType);
size_t base_id = static_cast<size_t>(baseType->ID());
const auto base_id = static_cast<size_t>(baseType->ID());
if(type_to_variable_type.size() <= base_id) {
type_to_variable_type.resize(base_id + 1);
}
type_to_variable_type[base_id] = torch::make_unique<VariableType>(&at::globalContext(), baseType);
type_to_variable_type[base_id] =
make_unique<VariableType>(&at::globalContext(), baseType);
}

struct VariableTypeRegistry {
Expand Down
3 changes: 2 additions & 1 deletion torch/csrc/autograd/engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "torch/csrc/autograd/grad_mode.h"
#include "torch/csrc/autograd/anomaly_mode.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/utils/memory.h"

#include <ATen/DeviceGuard.h>
#include <ATen/ExpandUtils.h>
Expand Down Expand Up @@ -634,7 +635,7 @@ void GraphTask::init_to_execute(Function& graph_root, const edge_list& outputs)
Function *output = output_edge.function.get();
auto & info = exec_info[output];
if (!info.captures)
info.captures.reset(new std::vector<ExecInfo::Capture>());
info.captures = make_unique<std::vector<ExecInfo::Capture>>();
info.captures->emplace_back(output_edge.input_nr, output_idx++);
}
captured_vars.resize(output_idx);
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/jit/argument_spec.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ struct ArgumentSpec {
hash_code = num_flat_inputs;
args.resize(num_flat_inputs);
size_t offset = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
addInput(inputs[i], offset, with_grad);
for (const auto& i : inputs) {
addInput(i, offset, with_grad);
}
JIT_ASSERT(offset == num_flat_inputs);
}
Expand Down Expand Up @@ -192,7 +192,7 @@ struct CompleteArgumentSpec {
data.resize(ninputs + all_dims*2);

// and reinterpret our data array as these structs
CompleteArgumentInfoPOD * pods = reinterpret_cast<CompleteArgumentInfoPOD*>(data.data());
auto* pods = reinterpret_cast<CompleteArgumentInfoPOD*>(data.data());
int64_t * next_dim = sizes_strides();
int32_t total_dims = 0;
for(int32_t i = 0; i < num_inputs; i++) {
Expand Down
9 changes: 5 additions & 4 deletions torch/csrc/jit/autodiff.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,8 +308,9 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
squeezed_dims.push_back(i);
}
SymbolicVariable returned_grad = grads.at(0);
for (auto it = squeezed_dims.begin(); it != squeezed_dims.end(); ++it)
returned_grad = returned_grad.unsqueeze(*it);
for (const auto& dim : squeezed_dims) {
returned_grad = returned_grad.unsqueeze(dim);
}
return {returned_grad};

} else if (node->matches("aten::squeeze(Tensor self, int dim) -> Tensor", /*const_inputs=*/attr::dim)) {
Expand All @@ -333,7 +334,7 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
// of equal sizes. We can use a single split operation to handle that.
if (std::all_of(tensor_inputs.begin(), tensor_inputs.end(), has_first_sizes)) {
auto tensor_grads = grads.at(0).chunk(tensor_inputs.size(), dim);
tensor_grads.push_back(nullptr); // for attr::dim
tensor_grads.emplace_back(nullptr); // for attr::dim
return tensor_grads;
} else {
size_t offset = 0;
Expand All @@ -343,7 +344,7 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
tensor_grads.push_back(grad.narrow(dim, offset, input.sizes()[dim]));
offset += input.sizes()[dim];
}
tensor_grads.push_back(nullptr); // for attr::dim
tensor_grads.emplace_back(nullptr); // for attr::dim
return tensor_grads;
}
} else if (comparison_ops.find(node)) {
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/code_template.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ struct TemplateEnv {
# if this list is not empty and ${foo,} will insert one after.
*/
struct CodeTemplate {
/* implicit */ CodeTemplate(const std::string & t)
: template_text(t) {}
/* implicit */ CodeTemplate(std::string t)
: template_text(std::move(t)) {}

std::string format(const TemplateEnv & env) {
std::stringstream out;
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/jit/export.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ class GraphEncoder: public EncoderBase {
}

private:
virtual void EncodeTensor(
void EncodeTensor(
onnx::TensorProto* tensor_proto,
const at::Tensor& tensor,
const c10::optional<std::string> external_ref = {}) override;
Expand Down Expand Up @@ -450,15 +450,15 @@ class ModuleEncoder: public EncoderBase {
script::Method &method,
const std::string prefix);

virtual void EncodeTensor(
void EncodeTensor(
onnx::TensorProto* tensor_proto,
const at::Tensor& tensor,
const c10::optional<std::string> external_ref = {}) override;

virtual void EncodeIntermediateValueInfo(onnx::GraphProto *graph_proto,
void EncodeIntermediateValueInfo(onnx::GraphProto *graph_proto,
const Value* n) override;

virtual void EncodeValueInfo(onnx::GraphProto *graph_proto,
void EncodeValueInfo(onnx::GraphProto *graph_proto,
onnx::ValueInfoProto* v,
const Value* n) override;

Expand Down
28 changes: 18 additions & 10 deletions torch/csrc/jit/fuser/cpu/fused_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include "torch/csrc/jit/code_template.h"
#include "torch/csrc/jit/fuser/cpu/temp_file.h"
#include "torch/csrc/jit/fuser/cpu/dynamic_library.h"
#include "torch/csrc/utils/memory.h"

#include <sstream>
#include <cstdlib>
Expand Down Expand Up @@ -37,7 +38,7 @@ struct CompilerConfig {
if (!programExists(cxx)) {
cxx = "";
}

const char* debug_env = getenv("PYTORCH_FUSION_DEBUG");
debug = debug_env && atoi(debug_env) != 0;
}
Expand Down Expand Up @@ -101,22 +102,29 @@ static void disas(const std::string& so_file) {
}

FusedKernelCPU::FusedKernelCPU(
const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random)
: FusedKernel{_name, _code, _input_desc, _output_desc, _chunk_desc, _concat_desc, _has_random} {
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random)
: FusedKernel(
std::move(name),
std::move(code),
std::move(input_desc),
std::move(output_desc),
std::move(chunk_desc),
std::move(concat_desc),
has_random) {
auto& config = getConfig();
TempFile so_file(so_template, 3);
TempFile cpp_file(cpp_template, 4);
cpp_file.write(code_);
cpp_file.sync();
runCompiler(cpp_file.name(), so_file.name());
if (config.debug) disas(so_file.name());
so_lib.reset(new DynamicLibrary(so_file.name().c_str()));
so_lib = make_unique<DynamicLibrary>(so_file.name().c_str());
#pragma GCC diagnostic ignored "-Wpedantic"
kernel = reinterpret_cast<void(*)(uint32_t, void**)>(so_lib->sym(name_.c_str()));
#pragma GCC diagnostic pop
Expand Down
25 changes: 12 additions & 13 deletions torch/csrc/jit/fuser/cpu/fused_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,20 @@ namespace torch { namespace jit { namespace fuser { namespace cpu {
// Represents a compiled CPU kernel and the metadata necessary to run it
struct TORCH_API FusedKernelCPU : public ::torch::jit::fuser::FusedKernel {
FusedKernelCPU(
const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random);

virtual at::Backend backend() const override {
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random);

at::Backend backend() const override {
return at::Backend::CPU;
}

virtual void launch_raw(
const uint32_t numel
, std::vector<void*>& arguments) const override {
void launch_raw(const uint32_t numel, std::vector<void*>& arguments)
const override {
kernel(numel, arguments.data());
}

Expand All @@ -42,7 +41,7 @@ struct TORCH_API FusedKernelCPU : public ::torch::jit::fuser::FusedKernel {

} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace jit
} // namespace torch

#endif // USE_CPU_FUSER
27 changes: 17 additions & 10 deletions torch/csrc/jit/fuser/cuda/fused_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,16 +67,23 @@ static void getMajorMinor(const cudaDeviceProp* const prop, int& major, int& min

// Compiles the specified kernel and stores the metadata required to run it
FusedKernelCUDA::FusedKernelCUDA(
const int16_t _device
, const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random)
: FusedKernel{_name, _code, _input_desc, _output_desc, _chunk_desc, _concat_desc, _has_random}
, device_{_device} {
int16_t device,
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random)
: FusedKernel(
std::move(name),
std::move(code),
std::move(input_desc),
std::move(output_desc),
std::move(chunk_desc),
std::move(concat_desc),
has_random),
device_(device) {
// Initializes driver's API context (if necessary)
CUcontext pctx = 0;
TORCH_CU_CHECK(cuCtxGetCurrent(&pctx));
Expand Down
27 changes: 13 additions & 14 deletions torch/csrc/jit/fuser/cuda/fused_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,23 @@ namespace torch { namespace jit { namespace fuser { namespace cuda {
// Note: CUDA functions are per device.
struct TORCH_API FusedKernelCUDA : public ::torch::jit::fuser::FusedKernel {
FusedKernelCUDA(
const int16_t _device
, const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random);

virtual ~FusedKernelCUDA() override {
int16_t device,
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random);

~FusedKernelCUDA() override {
cuModuleUnload(module_);
}

virtual void launch_raw(
const uint32_t numel
, std::vector<void*>& arguments) const override;
void launch_raw(const uint32_t numel, std::vector<void*>& arguments)
const override;

virtual at::Backend backend() const override {
at::Backend backend() const override {
return at::Backend::CUDA;
}

Expand Down
Loading

0 comments on commit 0479517

Please sign in to comment.