Skip to content

Commit

Permalink
Bag of clang tidy fixes for torch/csrc/ and torch/csrc/autograd (pyto…
Browse files Browse the repository at this point in the history
…rch#11050)

Summary:
Linting `torch/csrc/` (non-recursive) and `torch/csrc/autograd` (non-recursive).

Fixed things like:
- `typedef` vs `using`
- Use `.empty()` instead of comparing with empty string/using `.size() == 0`
- Use range for loops instead of old style loops (`modernize-`)
- Remove some `virtual` + `override`
- Replace `stdint.h` with `cstdint`
- Replace `return Type(x, y)` with `return {x, y}`
- Use boolean values (`true`/`false`)  instead of numbers (1/0)
- More ...

ezyang apaszke cpuhrsch
Pull Request resolved: pytorch#11050

Differential Revision: D9597505

Pulled By: goldsborough

fbshipit-source-id: cb0fb4793ade885a8dbf4b10484487b84c64c7f2
  • Loading branch information
goldsborough authored and facebook-github-bot committed Sep 6, 2018
1 parent 83a1ab2 commit dccd0f2
Show file tree
Hide file tree
Showing 31 changed files with 138 additions and 122 deletions.
4 changes: 4 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ Checks: '
*
,clang-analyzer-*
,modernize-*
,-cert-dcl21-cpp
,-cert-err58-cpp
,-cert-err60-cpp
,-clang-diagnostic-*
Expand All @@ -12,10 +13,12 @@ Checks: '
,-cppcoreguidelines-pro-bounds-constant-array-index
,-cppcoreguidelines-pro-type-member-init
,-cppcoreguidelines-pro-type-static-cast-downcast
,-cppcoreguidelines-pro-type-union-access
,-cppcoreguidelines-pro-type-vararg
,-cppcoreguidelines-special-member-functions
,-fuchsia-*
,-google-build-using-namespace
,-google-default-arguments
,-google-explicit-constructor
,-google-readability-braces-around-statements
,-google-readability-namespace-comments
Expand All @@ -24,6 +27,7 @@ Checks: '
,-google-runtime-references
,-hicpp-braces-around-statements
,-hicpp-explicit-conversions
,-hicpp-member-init
,-hicpp-no-array-decay
,-hicpp-signed-bitwise
,-hicpp-special-member-functions
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/function_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def TypedDict(name, attrs, total=True): # type: ignore
return at::native::${api_name}(${type_method_actuals}, options());
}
""")
# 4. add virtual override to TypeDerived.h
# 4. add override to TypeDerived.h
TYPE_DERIVED_DECLARATION = CodeTemplate("""\
${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
""")
Expand Down
6 changes: 4 additions & 2 deletions test/cpp/api/optim.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ bool test_optimizer_xor(Options options) {
const int64_t kBatchSize = 4;
const int64_t kMaximumNumberOfEpochs = 3000;

auto optimizer = OptimizerClass(std::vector<torch::Tensor>(), options);
optimizer.add_parameters(model->parameters());
OptimizerClass optimizer(model->parameters(), options);

float running_loss = 1;
int epoch = 0;
Expand Down Expand Up @@ -152,6 +151,9 @@ TEST_CASE("Optim/BasicInterface") {
REQUIRE(optimizer.size() == 0);
optimizer.add_parameters(parameters);
REQUIRE(optimizer.size() == parameters.size());
for (size_t p = 0; p < parameters.size(); ++p) {
REQUIRE(optimizer.parameters()[p].allclose(parameters[p]));
}
}
{
Linear linear(3, 4);
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/gen_variable_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
}

METHOD_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
""")

METHOD_DEFINITION = CodeTemplate("""\
Expand Down
36 changes: 18 additions & 18 deletions tools/autograd/templates/VariableType.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,30 +34,30 @@ void register_variable_type_for(at::Type* baseType);

struct TORCH_API VariableType final : public at::TypeDefault {
VariableType(Context* context, at::Type* baseType);
virtual at::ScalarType scalarType() const override;
virtual at::Backend backend() const override;
virtual at::Allocator* allocator() const override;
virtual at::Device getDeviceFromPtr(void * data) const override;
virtual Storage storage(bool resizable = false) const override;
virtual Storage storage(size_t size, bool resizable = false) const override;
virtual Storage storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const override;
virtual Storage storageWithAllocator(int64_t size, at::Allocator* allocator) const override;
virtual std::unique_ptr<at::Generator> generator() const override;
virtual const char * toString() const override;
virtual at::TypeID ID() const override;
virtual size_t elementSizeInBytes() const override;
virtual at::Type & toBackend(at::Backend b) const override;
virtual at::Type & toScalarType(at::ScalarType s) const override;
virtual Storage unsafeStorageFromTH(void * th_pointer, bool retain) const override;
virtual at::Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const override;
at::ScalarType scalarType() const override;
at::Backend backend() const override;
at::Allocator* allocator() const override;
at::Device getDeviceFromPtr(void * data) const override;
Storage storage(bool resizable = false) const override;
Storage storage(size_t size, bool resizable = false) const override;
Storage storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const override;
Storage storageWithAllocator(int64_t size, at::Allocator* allocator) const override;
std::unique_ptr<at::Generator> generator() const override;
const char * toString() const override;
at::TypeID ID() const override;
size_t elementSizeInBytes() const override;
at::Type & toBackend(at::Backend b) const override;
at::Type & toScalarType(at::ScalarType s) const override;
Storage unsafeStorageFromTH(void * th_pointer, bool retain) const override;
at::Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const override;

static at::Type* getVariableTypeFromBaseType(const at::Type& baseType);
static bool isVariableType(const at::Type& type);
static std::vector<at::Type*> allCUDATypes();
static std::vector<at::Type*> allCPUTypes();

virtual Tensor & s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const override;
virtual Tensor & _s_copy_from(const Tensor & self, Tensor & dst, bool non_blocking) const override;
Tensor & s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const override;
Tensor & _s_copy_from(const Tensor & self, Tensor & dst, bool non_blocking) const override;
${type_derived_method_declarations}

private:
Expand Down
2 changes: 1 addition & 1 deletion tools/clang_tidy.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def parse_options():

def main():
options = parse_options()
paths = map(normalize_directory_path, options.paths)
paths = list(map(normalize_directory_path, options.paths))
if options.revision:
files = get_changed_files(options.revision, paths, options.verbose)
else:
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/Size.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#include "torch/csrc/python_headers.h"
#include "torch/csrc/autograd/variable.h"
#include "stdint.h"
#include "cstdint"

extern PyTypeObject THPSizeType;

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/Types.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include <TH/TH.h>

#ifndef INT64_MAX
#include "stdint.h"
#include "cstdint"
#endif

template <typename T> struct THPTypeInfo {};
Expand Down
4 changes: 3 additions & 1 deletion torch/csrc/api/include/torch/detail/ordered_dict.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ class OrderedDict {

// Move works by default, because you can move-construct vectors of const
// values..
OrderedDict(OrderedDict&& other) = default;
OrderedDict(OrderedDict&& other) noexcept(
noexcept(std::unordered_map<Key, size_t>()) &&
noexcept(std::vector<Item>())) = default;
OrderedDict& operator=(OrderedDict&& other) = default;

~OrderedDict() = default;
Expand Down
44 changes: 22 additions & 22 deletions torch/csrc/api/include/torch/nn/pimpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,31 +155,31 @@ class ModuleHolder : torch::detail::ModuleHolderIndicator {
} // namespace nn
} // namespace torch

#define TORCH_ARG(T, name) \
auto name(const T& new_##name)->decltype(*this) { \
this->name##_ = new_##name; \
return *this; \
} \
auto name(T&& new_##name)->decltype(*this) { \
this->name##_ = std::move(new_##name); \
return *this; \
} \
const T& name() const noexcept { \
return this->name##_; \
} \
T name##_
#define TORCH_ARG(T, name) \
auto name(const T& new_##name)->decltype(*this) { /* NOLINT */ \
this->name##_ = new_##name; \
return *this; \
} \
auto name(T&& new_##name)->decltype(*this) { /* NOLINT */ \
this->name##_ = std::move(new_##name); \
return *this; \
} \
const T& name() const noexcept { /* NOLINT */ \
return this->name##_; \
} \
T name##_ /* NOLINT */

/// Defines a class `Name` which inherits from `nn::ModuleHolder` to provide a
/// wrapper over a `std::shared_ptr<Impl>`.
#define TORCH_MODULE_IMPL(Name, Impl) \
class Name : public torch::nn::ModuleHolder<Impl> { \
public: \
using torch::nn::ModuleHolder<Impl>::ModuleHolder; \
Name(const Name&) = default; \
Name(Name&&) = default; \
Name(Name& other) : Name(static_cast<const Name&>(other)) {} \
Name& operator=(const Name&) = default; \
Name& operator=(Name&&) = default; \
#define TORCH_MODULE_IMPL(Name, Impl) \
class Name : public torch::nn::ModuleHolder<Impl> { /* NOLINT */ \
public: \
using torch::nn::ModuleHolder<Impl>::ModuleHolder; \
Name(const Name&) = default; /* NOLINT */ \
Name(Name&&) = default; /* NOLINT */ \
Name(Name& other) : Name(static_cast<const Name&>(other)) {} /* NOLINT */ \
Name& operator=(const Name&) = default; /* NOLINT */ \
Name& operator=(Name&&) = default; /* NOLINT */ \
}

/// Like `TORCH_MODULE_IMPL`, but defaults the `Impl` name to `<Name>Impl`.
Expand Down
13 changes: 6 additions & 7 deletions torch/csrc/api/include/torch/optim/optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,20 @@ class OptimizerBase {
virtual ~OptimizerBase() = default;

/// Adds the given vector of parameters to the optimizer's parameter list.
/// Override this method if you want to modify the way parameters are added to
/// the `Optimizer`.
virtual void add_parameters(const std::vector<Tensor>& parameters);
void add_parameters(const std::vector<Tensor>& parameters);

/// Adds the `ParameterCursor`'s parameters to the optimizer's parameter list.
/// NOTE: Calls the `vector<Tensor>` overload of `add_parameters` -- override
/// that method if you want to modify the behavior of `add_parameters`.
virtual void add_parameters(const ParameterCursor& cursor);
void add_parameters(const ParameterCursor& cursor);

/// Zeros out the gradients of all parameters.
virtual void zero_grad();

/// Provides a reference to the parameters this optimizer holds.
/// Provides a const reference to the parameters this optimizer holds.
const std::vector<Tensor>& parameters() const noexcept;

/// Provides a reference to the parameters this optimizer holds.
std::vector<Tensor>& parameters() noexcept;

/// Returns the number of parameters referenced by the optimizer.
size_t size() const noexcept;

Expand Down
8 changes: 8 additions & 0 deletions torch/csrc/api/src/optim/optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ void OptimizerBase::zero_grad() {
}
}

const std::vector<Tensor>& OptimizerBase::parameters() const noexcept {
return parameters_;
}

std::vector<Tensor>& OptimizerBase::parameters() noexcept {
return parameters_;
}

size_t OptimizerBase::size() const noexcept {
return parameters_.size();
}
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/anomaly_mode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@

namespace torch { namespace autograd {

bool AnomalyMode::_enabled = 0;
bool AnomalyMode::_enabled = false;

}}
20 changes: 10 additions & 10 deletions torch/csrc/autograd/engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ struct FunctionTask {

FunctionTask(GraphTask* base, std::shared_ptr<Function> fn, InputBuffer inputs)
: base(base)
, fn(fn)
, fn(std::move(fn))
, inputs(std::move(inputs)) {}
};

Expand Down Expand Up @@ -170,15 +170,10 @@ struct GraphTask {
}

GraphTask(bool keep_graph, bool grad_mode)
: exception()
, has_error(false)
: has_error(false)
, outstanding_tasks(0)
, keep_graph(keep_graph)
, grad_mode(grad_mode)
, mutex()
, not_done()
, not_ready()
, dependencies()
, owner(NO_DEVICE) {}
};

Expand All @@ -194,12 +189,12 @@ auto ReadyQueue::push(FunctionTask item) -> void {
auto ReadyQueue::pop() -> FunctionTask {
std::unique_lock<std::mutex> lock(mutex);
not_empty.wait(lock, [this]{ return !heap.empty(); });
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
auto task = std::move(const_cast<FunctionTask&>(heap.top())); heap.pop();
return task;
}

Engine::Engine() : ready_queues() {
}
Engine::Engine() = default;

// This Engine's ReadyQueues and their corresponding threads are leaked here
Engine::~Engine() = default;
Expand Down Expand Up @@ -376,6 +371,7 @@ static variable_list call_function(FunctionTask& task) {
checkpoint_valid = prev_checkpoint_valid_state;

if(has_post_hooks){
// NOLINTNEXTLINE(bugprone-use-after-move)
return call_post_hooks(fn, std::move(outputs), std::move(inputs));
}
return outputs;
Expand Down Expand Up @@ -478,7 +474,7 @@ auto Engine::compute_dependencies(Function* root, GraphTask& task) -> void {
// Queue contains all nodes that will start propagating gradients.
// We no longer have to expand functions that don't require grad.
auto& dependencies = task.dependencies;
while (queue.size() > 0) {
while (!queue.empty()) {
auto fn = queue.back(); queue.pop_back();
for (const auto& edge : fn->next_edges()) {
if (auto next_ptr = edge.function.get()) {
Expand Down Expand Up @@ -513,6 +509,7 @@ auto Engine::execute(const edge_list& roots,
const edge_list& outputs) -> variable_list {
std::call_once(start_threads_flag, &Engine::start_threads, this);

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
validate_outputs(roots, const_cast<variable_list&>(inputs), [](const std::string& msg) {
return msg;
});
Expand Down Expand Up @@ -559,6 +556,9 @@ auto Engine::execute(const edge_list& roots,
// more callbacks (or they can be registered from other threads
// while it's waiting.
std::unique_lock<std::mutex> cb_lock(post_callbacks_lock);
// WARNING: Don't use a range-for loop here because more callbacks may be
// added in between callback calls, so iterators may become invalidated.
// NOLINTNEXTLINE(modernize-loop-convert)
for (size_t i = 0; i < final_callbacks.size(); ++i) {
cb_lock.unlock();
final_callbacks[i]();
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ struct TORCH_API Engine {
};

// allow python_engine to override the default engine when it loads
typedef Engine& (*EngineStub)(void);
using EngineStub = Engine& (*)();
TORCH_API void set_default_engine_stub(EngineStub stub);

}} // namespace torch::autograd
4 changes: 2 additions & 2 deletions torch/csrc/autograd/function.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,14 @@ void deleteFunction(Function* function) {

delete function;

if (deleteFunctionQueue.size() == 0) {
if (deleteFunctionQueue.empty()) {
return;
}
if (recursion_depth.value() != kDeleteFunctionMaxRecursionDepth) {
AT_ERROR("Only one deleter per thread should be able to process "
"the delete queue. Please open an issue.");
}
while (deleteFunctionQueue.size() > 0) {
while (!deleteFunctionQueue.empty()) {
auto queued_function = deleteFunctionQueue.front();
deleteFunctionQueue.pop_front();
delete queued_function;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/grad_mode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

namespace torch { namespace autograd {

thread_local bool GradMode_enabled = 1;
thread_local bool GradMode_enabled = true;

bool GradMode::is_enabled() {
return GradMode_enabled;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/byte_order.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef THP_BYTE_ORDER_H
#define THP_BYTE_ORDER_H

#include <stdint.h>
#include <cstdint>
#include <stddef.h>
#include <THHalf.h>

Expand Down
Loading

0 comments on commit dccd0f2

Please sign in to comment.