Skip to content

Commit

Permalink
[JIT] Fix clang-tidy warnings in jit/python (pytorch#47985)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: pytorch#47985

Test Plan: Imported from OSS

Reviewed By: ZolotukhinM

Differential Revision: D25258644

Pulled By: SplitInfinity

fbshipit-source-id: dfc15dc62c148f79f4e99fd058a6bf2d071ccbb5
  • Loading branch information
Meghan Lele authored and facebook-github-bot committed Dec 2, 2020
1 parent 8746e1a commit 18eccfb
Show file tree
Hide file tree
Showing 9 changed files with 109 additions and 94 deletions.
30 changes: 17 additions & 13 deletions torch/csrc/jit/python/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ void initJITBindings(PyObject* module) {
py::arg("prefix") = "top")
.def(
"_jit_pass_remove_inplace_ops",
[](std::shared_ptr<Graph> g) { return RemoveInplaceOps(g); })
[](const std::shared_ptr<Graph>& g) { return RemoveInplaceOps(g); })
.def("_jit_pass_constant_pooling", ConstantPooling)
.def(
"_jit_pass_create_functional_graphs",
Expand Down Expand Up @@ -378,7 +378,9 @@ void initJITBindings(PyObject* module) {
.def("_jit_pass_lint", LintGraph)
.def(
"_jit_pass_complete_shape_analysis",
[](std::shared_ptr<Graph> graph, py::tuple inputs, bool with_grad) {
[](const std::shared_ptr<Graph>& graph,
const py::tuple& inputs,
bool with_grad) {
ArgumentSpecCreator arg_spec_creator(*graph);
Stack stack;
stack.reserve(inputs.size()); // captures?
Expand All @@ -400,7 +402,7 @@ void initJITBindings(PyObject* module) {
})
.def(
"_jit_interpret_graph",
[](std::shared_ptr<Graph>& graph, py::tuple inputs) {
[](std::shared_ptr<Graph>& graph, const py::tuple& inputs) {
Stack stack;
stack.reserve(inputs.size()); // captures?
for (auto& obj : inputs) {
Expand Down Expand Up @@ -441,7 +443,9 @@ void initJITBindings(PyObject* module) {
.def("_jit_pass_erase_shape_information", EraseShapeInformation)
.def(
"_jit_pass_create_autodiff_subgraphs",
[](std::shared_ptr<Graph> graph) { CreateAutodiffSubgraphs(graph); })
[](const std::shared_ptr<Graph>& graph) {
CreateAutodiffSubgraphs(graph);
})
#if defined(BUILDING_TESTS) && !defined(__HIP_PLATFORM_HCC__)
.def(
"_jit_run_cpp_tests",
Expand Down Expand Up @@ -469,7 +473,7 @@ void initJITBindings(PyObject* module) {
})
.def(
"_jit_unflatten",
[](autograd::variable_list vars, python::IODescriptor& desc) {
[](const autograd::variable_list& vars, python::IODescriptor& desc) {
return py::reinterpret_steal<py::object>(
python::unflatten(vars, desc));
})
Expand All @@ -494,8 +498,8 @@ void initJITBindings(PyObject* module) {
})
.def(
"_jit_check_alias_annotation",
[](std::shared_ptr<Graph> g,
py::tuple args,
[](const std::shared_ptr<Graph>& g,
const py::tuple& args,
const std::string& unqualified_op_name) {
auto stack = toTraceableStack(args);
checkAliasAnnotation(g, std::move(stack), unqualified_op_name);
Expand Down Expand Up @@ -553,7 +557,7 @@ void initJITBindings(PyObject* module) {
.def(
"_jit_try_infer_type",
[](py::object obj) -> TypePtr {
auto match = tryToInferType(obj);
auto match = tryToInferType(std::move(obj));
if (match.success()) {
return match.type();
}
Expand Down Expand Up @@ -647,7 +651,7 @@ void initJITBindings(PyObject* module) {
[](std::shared_ptr<Graph>& g) { return FuseTensorExprs(g); })
.def(
"_jit_fuser_get_fused_kernel_code",
[](Graph& g, std::vector<at::Tensor> inps) {
[](Graph& g, const std::vector<at::Tensor>& inps) {
return debugGetFusedKernelCode(g, inps);
})
.def(
Expand Down Expand Up @@ -945,7 +949,7 @@ void initJITBindings(PyObject* module) {
py::class_<PyTorchStreamReader>(m, "PyTorchFileReader")
.def(py::init<std::string>())
.def(py::init([](const py::object& buffer) {
auto adapter = std::make_unique<BufferAdapter>(std::move(buffer));
auto adapter = std::make_unique<BufferAdapter>(buffer);
return std::make_unique<PyTorchStreamReader>(std::move(adapter));
}))
.def(
Expand Down Expand Up @@ -1135,7 +1139,7 @@ void initJITBindings(PyObject* module) {
m.def("_jit_get_custom_class_schemas", customClassSchemasForBCCheck);
m.def("_jit_get_schemas_for_operator", [](const std::string& qualified_name) {
auto symbol = Symbol::fromQualString(qualified_name);
auto operations = getAllOperatorsFor(symbol);
const auto& operations = getAllOperatorsFor(symbol);
return fmap(operations, [](const std::shared_ptr<Operator>& op) {
return op->schema();
});
Expand Down Expand Up @@ -1281,8 +1285,8 @@ void initJITBindings(PyObject* module) {
});
});

m.def("_jit_assert_is_instance", [](py::object obj, TypePtr type) {
toIValue(obj, type);
m.def("_jit_assert_is_instance", [](py::object obj, const TypePtr& type) {
toIValue(std::move(obj), type);
});

initPythonCustomClassBindings(module);
Expand Down
45 changes: 24 additions & 21 deletions torch/csrc/jit/python/pybind_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,8 @@ inline TypedIValue toDictKeyIValue(py::handle key) {
}

inline c10::optional<TypePtr> unifyOrInitializeType(
TypePtr accum,
TypePtr unify) {
const TypePtr& accum,
const TypePtr& unify) {
if (!accum) {
return unify;
}
Expand Down Expand Up @@ -499,7 +499,7 @@ inline InferredType tryToInferContainerType(py::handle input) {
}
}

inline bool isTraceableType(TypePtr type) {
inline bool isTraceableType(const TypePtr& type) {
if (type->isSubtypeOf(TensorType::get())) {
return true;
}
Expand All @@ -512,7 +512,9 @@ inline bool isTraceableType(TypePtr type) {
return std::all_of(
tuple_type->elements().begin(),
tuple_type->elements().end(),
[](TypePtr element_type) { return isTraceableType(element_type); });
[](const TypePtr& element_type) {
return isTraceableType(element_type);
});
}

if (auto dict_type = type->cast<DictType>()) {
Expand Down Expand Up @@ -545,13 +547,13 @@ inline Stack toTraceableStack(const py::tuple& inputs) {
inline IValue createGenericList(py::handle obj, const TypePtr& elem_type) {
auto elems = c10::impl::GenericList(elem_type);
for (auto elem : obj) {
elems.push_back(toIValue(std::move(elem), elem_type));
elems.push_back(toIValue(elem, elem_type));
}
return IValue(std::move(elems));
}

inline IValue createGenericDict(
py::dict obj,
const py::dict& obj,
const TypePtr& key_type,
const TypePtr& value_type) {
c10::impl::GenericDict elems(key_type, value_type);
Expand Down Expand Up @@ -747,7 +749,7 @@ inline IValue toIValue(
"a TorchScript compatible type, did you forget to",
"turn it into a user defined TorchScript class?"));
}
res = toIValue(std::move(obj), classType);
res = toIValue(obj, classType);
}
// check if the classType conform with the interface or not
std::stringstream why_not;
Expand Down Expand Up @@ -1074,9 +1076,9 @@ inline Stack createStackForSchema(
push(stack, std::move(*self));
}
// First push all positional args.
for (size_t i = 0; i < args.size(); ++i) {
for (const auto& arg : args) {
// Use the type information from the schema to convert the PyObject.
push(stack, argumentToIValue(schema, stack.size(), args[i]));
push(stack, argumentToIValue(schema, stack.size(), arg));
}

// Now for every remaining non-positional argument in the schema, look for it
Expand Down Expand Up @@ -1153,15 +1155,16 @@ inline Stack evilDeprecatedBadCreateStackDoNotUse(
// tracing graph.
inline py::object runAndInsertCall(
Function& callee,
tuple_slice args,
py::kwargs kwargs,
const tuple_slice& args,
const py::kwargs& kwargs,
c10::optional<IValue> self,
// Lambda that tells this function how to insert `callee` into the graph if
// we're tracing.
std::function<Value*(Graph&, const MatchedSchema& match)> callInserter) {
auto stack = createStackForSchema(
callee.getSchema(), std::move(args), std::move(kwargs), std::move(self));
auto tracing_state = tracer::getTracingState();
const std::function<Value*(Graph&, const MatchedSchema& match)>&
callInserter) {
auto stack =
createStackForSchema(callee.getSchema(), args, kwargs, std::move(self));
const auto& tracing_state = tracer::getTracingState();
if (!tracing_state) {
pybind11::gil_scoped_release no_gil_guard;
// If we're not tracing, just run the callee as normal.
Expand Down Expand Up @@ -1211,8 +1214,8 @@ inline py::object runAndInsertCall(

inline py::object invokeScriptFunctionFromPython(
Function& callee,
tuple_slice args,
py::kwargs kwargs) {
const tuple_slice& args,
const py::kwargs& kwargs) {
return runAndInsertCall(
callee,
args,
Expand All @@ -1225,8 +1228,8 @@ inline py::object invokeScriptFunctionFromPython(

inline py::object invokeScriptMethodFromPython(
Method& callee,
tuple_slice args,
py::kwargs kwargs) {
const tuple_slice& args,
const py::kwargs& kwargs) {
auto self = callee.owner()._ivalue();
return runAndInsertCall(
callee.function(),
Expand All @@ -1241,14 +1244,14 @@ inline py::object invokeScriptMethodFromPython(
inline py::object invokeOperatorFromPython(
const std::vector<std::shared_ptr<Operator>>& operations,
py::args args,
py::kwargs kwargs) {
const py::kwargs& kwargs) {
Stack stack;

if (operations.size() == 1) {
const Operator& op = *operations.at(0);
// Create a stack full of the arguments and keyword arguments.
stack = createStackForSchema(
op.schema(), std::move(args), std::move(kwargs), c10::nullopt);
op.schema(), std::move(args), kwargs, c10::nullopt);

pybind11::gil_scoped_release no_gil_guard;
op.getOperation()(&stack);
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/python/python_arg_flatten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ py::object cast_dict(std::vector<py::object> objs) {
py::dict sequence = {};
for (size_t i = 0; i < num_objs; ++i) {
py::tuple obj = py::reinterpret_borrow<py::tuple>(objs[i]);
sequence[obj[0]] = std::move(obj[1]);
sequence[obj[0]] = obj[1];
}
return std::move(sequence);
}
Expand Down
31 changes: 17 additions & 14 deletions torch/csrc/jit/python/python_ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <torch/csrc/utils/python_strings.h>
#include <iostream>
#include <sstream>
#include <utility>

namespace torch {
namespace jit {
Expand Down Expand Up @@ -216,12 +217,12 @@ void initPythonIRBindings(PyObject* module_) {
.def(
"dump_alias_db",
[](std::shared_ptr<Graph> g) {
AliasDb db(g);
AliasDb db(std::move(g));
db.dump();
})
.def(
"_export_onnx",
[](const std::shared_ptr<Graph> g,
[](const std::shared_ptr<Graph>& g,
const std::map<std::string, at::Tensor>& initializers,
int64_t onnx_opset_version,
const std::unordered_map<
Expand Down Expand Up @@ -282,7 +283,7 @@ void initPythonIRBindings(PyObject* module_) {
py::arg("onnx_file_path") = std::string())
.def(
"_pretty_print_onnx",
[](const std::shared_ptr<Graph> g,
[](const std::shared_ptr<Graph>& g,
const std::map<std::string, at::Tensor>& initializers,
int64_t onnx_opset_version,
bool defer_weight_export,
Expand Down Expand Up @@ -389,7 +390,7 @@ void initPythonIRBindings(PyObject* module_) {
.GS(prependNode)
.def(
"insertConstant",
[](Graph& g, IValue ival) { return g.insertConstant(ival); })
[](Graph& g, const IValue& ival) { return g.insertConstant(ival); })
.GS(lint)
.GS(insertNode);
#undef GS
Expand Down Expand Up @@ -587,7 +588,7 @@ void initPythonIRBindings(PyObject* module_) {
// Tensor (t_) -- manually written to unwrap the variable into a tensor.
.def(
"t_",
[](Node& n, const char* name, torch::autograd::Variable v) {
[](Node& n, const char* name, const torch::autograd::Variable& v) {
AT_ASSERT(!v.requires_grad());
return n.t_(Symbol::attr(name), v);
})
Expand All @@ -599,7 +600,7 @@ void initPythonIRBindings(PyObject* module_) {
"ts_",
[](Node& n,
const char* name,
std::vector<torch::autograd::Variable> vs) {
const std::vector<torch::autograd::Variable>& vs) {
std::vector<at::Tensor> tensors;
tensors.reserve(vs.size());
for (auto& variable : vs) {
Expand All @@ -621,7 +622,7 @@ void initPythonIRBindings(PyObject* module_) {
})
.def(
"z_",
[](Node& n, const char* name, at::Tensor v) {
[](Node& n, const char* name, const at::Tensor& v) {
return n.t_(
Symbol::attr(name),
autograd::Variable(v.view({})).set_requires_grad(false));
Expand Down Expand Up @@ -729,7 +730,7 @@ void initPythonIRBindings(PyObject* module_) {
})
.def(
"isSubtypeOf",
[](std::shared_ptr<Type>& self, std::shared_ptr<Type> other) {
[](std::shared_ptr<Type>& self, std::shared_ptr<Type>& other) {
if (!other) {
return false;
}
Expand Down Expand Up @@ -767,8 +768,9 @@ void initPythonIRBindings(PyObject* module_) {
.def_static("get", &NoneType::get);

py::class_<TupleType, Type, std::shared_ptr<TupleType>>(m, "TupleType")
.def(
py::init([](std::vector<TypePtr> a) { return TupleType::create(a); }))
.def(py::init([](std::vector<TypePtr> a) {
return TupleType::create(std::move(a));
}))
.def("elements", [](TupleType& self) {
std::vector<TypePtr> types;
for (const auto& type : self.elements()) {
Expand All @@ -785,21 +787,22 @@ void initPythonIRBindings(PyObject* module_) {
.def("getElementType", &ListType::getElementType);
py::class_<DictType, Type, std::shared_ptr<DictType>>(m, "DictType")
.def(py::init([](TypePtr key, TypePtr value) {
return DictType::create(key, value);
return DictType::create(std::move(key), std::move(value));
}))
.def("getKeyType", &DictType::getKeyType)
.def("getValueType", &DictType::getValueType);
py::class_<OptionalType, Type, std::shared_ptr<OptionalType>>(
m, "OptionalType")
.def(py::init([](TypePtr a) { return OptionalType::create(a); }))
.def(py::init(
[](TypePtr a) { return OptionalType::create(std::move(a)); }))
.def_static("ofTensor", &OptionalType::ofTensor)
.def("getElementType", &OptionalType::getElementType);
py::class_<RRefType, Type, std::shared_ptr<RRefType>>(m, "RRefType")
.def(py::init([](TypePtr a) { return RRefType::create(a); }))
.def(py::init([](TypePtr a) { return RRefType::create(std::move(a)); }))
.def("getElementType", &RRefType::getElementType);

py::class_<FutureType, Type, std::shared_ptr<FutureType>>(m, "FutureType")
.def(py::init([](TypePtr a) { return FutureType::create(a); }))
.def(py::init([](TypePtr a) { return FutureType::create(std::move(a)); }))
.def("getElementType", &FutureType::getElementType);

py::class_<ClassType, Type, std::shared_ptr<ClassType>>(m, "ClassType")
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/jit/python/python_sugared_value.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,12 @@ FunctionSchema PythonValue::getSchema(

auto types_it = arg_types.begin();
for (; types_it != arg_types.end(); ++types_it, ++names_it) {
args.push_back(Argument(
args.emplace_back(
/*name=*/*names_it,
/*type=*/std::move(*types_it),
/*N=*/c10::nullopt,
/*default_value=*/c10::nullopt,
/*kwarg_only=*/false));
/*kwarg_only=*/false);
}
rets.push_back(Argument("0", std::move(ret_type), {}, {}, false));
}
Expand Down Expand Up @@ -293,7 +293,7 @@ SugaredValuePtr ModuleValue::getitem(
void checkInterface(
const SourceRange& loc,
Function& m,
std::shared_ptr<ModuleValue> self,
const std::shared_ptr<ModuleValue>& self,
const std::string& field) {
if (self->asValue(loc, m)->type()->cast<InterfaceType>()) {
throw ErrorReport(loc)
Expand All @@ -307,7 +307,7 @@ void recurseThroughNestedModules(
Function& m,
std::vector<SugaredValuePtr>& keys,
std::vector<SugaredValuePtr>& values,
std::shared_ptr<ModuleValue> self,
std::shared_ptr<ModuleValue>& self,
const std::string& prefix,
const std::string& field) {
auto prefix_value =
Expand Down
Loading

0 comments on commit 18eccfb

Please sign in to comment.