Skip to content

Commit

Permalink
Add misc checks and turn some functions and variables into static (on…
Browse files Browse the repository at this point in the history
…nx#6401)

### Description
<!-- - Describe your changes. -->

### Motivation and Context
For better code.
<!-- - Why is this change required? What problem does it solve? -->
<!-- - If it fixes an open issue, please link to the issue here. -->

---------

Signed-off-by: cyy <cyyever@outlook.com>
  • Loading branch information
cyyever authored Oct 7, 2024
1 parent 41cba9d commit 8b1209b
Show file tree
Hide file tree
Showing 23 changed files with 120 additions and 146 deletions.
6 changes: 6 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,12 @@ Checks: >-
google-default-arguments,
google-global-names-in-headers,
google-explicit-constructor,
misc-*,
-misc-const-correctness,
-misc-include-cleaner,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
-misc-use-anonymous-namespace,
modernize-*,
-modernize-concat-nested-namespaces,
-modernize-raw-string-literal,
Expand Down
22 changes: 10 additions & 12 deletions onnx/checker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -310,10 +310,8 @@ void check_map(const MapProto& map, const CheckerContext& ctx) {
// Check that the index data stored in a SparseTensorProto is valid.
// indices: a 1-dimensional tensor; indices[i] represents the
// linearized index value for the i-th nonzero value.
void check_sparse_tensor_indices_1(
const TensorProto& indices,
const SparseTensorProto& sparse_tensor_proto,
size_t nnz) {
static void
check_sparse_tensor_indices_1(const TensorProto& indices, const SparseTensorProto& sparse_tensor_proto, size_t nnz) {
int dense_rank = sparse_tensor_proto.dims_size();
int64_t dense_size = 1;
for (int i = 0; i < dense_rank; ++i)
Expand Down Expand Up @@ -350,10 +348,8 @@ void check_sparse_tensor_indices_1(
// Check that the index data stored in a SparseTensorProto is valid.
// indices: a 2-dimensional tensor; indices[i,j] represents the j-th
// index value for the i-th nonzero value.
void check_sparse_tensor_indices_2(
const TensorProto& indices,
const SparseTensorProto& sparse_tensor_proto,
size_t nnz) {
static void
check_sparse_tensor_indices_2(const TensorProto& indices, const SparseTensorProto& sparse_tensor_proto, size_t nnz) {
int dense_rank = sparse_tensor_proto.dims_size();
if (static_cast<size_t>(indices.dims(0)) != nnz) {
fail_check("Sparse tensor indices (", indices.name(), ") first dimension size does not equal NNZ.");
Expand Down Expand Up @@ -523,7 +519,7 @@ void check_attribute(const AttributeProto& attr, const CheckerContext& ctx, cons
}
}

void print_warning_if_has_experimental(const std::unordered_set<std::string>& used_experimental_ops) {
static void print_warning_if_has_experimental(const std::unordered_set<std::string>& used_experimental_ops) {
if (!used_experimental_ops.empty()) {
std::string all_experimental_ops;
for (const auto& op : used_experimental_ops) {
Expand Down Expand Up @@ -716,7 +712,9 @@ void check_graph(const GraphProto& graph, const CheckerContext& ctx, const Lexic

// Utilify function to get the imported version of domain from opset imports
// Returns -1 if requested domain is not found in the opset_imports
int get_version_for_domain(const std::string& domain, const std::unordered_map<std::string, int>& opset_imports) {
static int get_version_for_domain(
const std::string& domain,
const std::unordered_map<std::string, int>& opset_imports) {
auto it = opset_imports.find(domain);
if (it == opset_imports.end()) {
return -1;
Expand Down Expand Up @@ -890,7 +888,7 @@ void check_function(const FunctionProto& function, const CheckerContext& ctx, co
print_warning_if_has_experimental(used_experimental_ops);
}

void check_model(const ModelProto& model, CheckerContext& ctx) {
static void check_model(const ModelProto& model, CheckerContext& ctx) {
if (!model.ir_version()) {
fail_check("The model does not have an ir_version set properly.");
}
Expand Down Expand Up @@ -1062,7 +1060,7 @@ std::string resolve_external_data_location(
#endif
}

std::unordered_set<std::string> experimental_ops = {
static std::unordered_set<std::string> experimental_ops = {
"ATen",
"Affine",
"ConstantFill",
Expand Down
2 changes: 1 addition & 1 deletion onnx/checker.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class CheckerContext final {
check_custom_domain_ = value;
}

explicit CheckerContext() {}
explicit CheckerContext() = default;

private:
int ir_version_{-1};
Expand Down
24 changes: 12 additions & 12 deletions onnx/common/ir_pb_converter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
namespace ONNX_NAMESPACE {

// Part 1: convert ONNX Protobuf to IR
std::unique_ptr<Graph> graphProtoToGraph(const GraphProto& gp, bool nested, const int ir_version = IR_VERSION);
static std::unique_ptr<Graph> graphProtoToGraph(const GraphProto& gp, bool nested, const int ir_version = IR_VERSION);

Tensor tensorProtoToTensor(const ONNX_NAMESPACE::TensorProto& tp) {
static Tensor tensorProtoToTensor(const ONNX_NAMESPACE::TensorProto& tp) {
Tensor ret;

ret.sizes().reserve(tp.dims_size());
Expand Down Expand Up @@ -108,7 +108,7 @@ Tensor tensorProtoToTensor(const ONNX_NAMESPACE::TensorProto& tp) {
return ret;
}

void convertAttribute(const ONNX_NAMESPACE::AttributeProto& ap, Node* n, const int ir_version = IR_VERSION) {
static void convertAttribute(const ONNX_NAMESPACE::AttributeProto& ap, Node* n, const int ir_version = IR_VERSION) {
Symbol sym = Symbol(ap.name());
switch (ap.type()) {
case ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT:
Expand Down Expand Up @@ -193,13 +193,13 @@ void convertAttribute(const ONNX_NAMESPACE::AttributeProto& ap, Node* n, const i
}
}

void convertAttributes(ONNX_NAMESPACE::NodeProto& np, Node* n, const int ir_version = IR_VERSION) {
static void convertAttributes(ONNX_NAMESPACE::NodeProto& np, Node* n, const int ir_version = IR_VERSION) {
for (int i = 0; i < np.attribute_size(); i++) {
convertAttribute(np.attribute(i), n, ir_version);
}
}

std::vector<Dimension> tensorShapeProtoToDimensions(const ONNX_NAMESPACE::TensorShapeProto& tsp) {
static std::vector<Dimension> tensorShapeProtoToDimensions(const ONNX_NAMESPACE::TensorShapeProto& tsp) {
std::vector<Dimension> dims;
dims.reserve(tsp.dim_size());
for (int i = 0; i < tsp.dim_size(); i++) {
Expand All @@ -216,7 +216,7 @@ std::vector<Dimension> tensorShapeProtoToDimensions(const ONNX_NAMESPACE::Tensor
return dims;
}

void createDummyValue(
static void createDummyValue(
std::unique_ptr<Graph>& g,
const std::string& name,
std::unordered_map<std::string, Value*>& value_by_name_of) {
Expand Down Expand Up @@ -406,13 +406,13 @@ std::unique_ptr<Graph> ImportModelProto(const ModelProto& mp) {
}

// Part 2: convert IR to ONNX Protobuf
std::string value_name(Value* n) {
static std::string value_name(Value* n) {
return n->uniqueName();
}

void encodeGraph(GraphProto* p_g, const std::shared_ptr<Graph>& g);
static void encodeGraph(GraphProto* p_g, const std::shared_ptr<Graph>& g);

void encodeTensor(ONNX_NAMESPACE::TensorProto* p, const Tensor& tensor) {
static void encodeTensor(ONNX_NAMESPACE::TensorProto* p, const Tensor& tensor) {
if (tensor.hasName()) {
p->set_name(tensor.name());
}
Expand Down Expand Up @@ -481,7 +481,7 @@ void encodeTensor(ONNX_NAMESPACE::TensorProto* p, const Tensor& tensor) {
}
}

void addAttribute(ONNX_NAMESPACE::NodeProto* n_p, Node* n, Symbol name) {
static void addAttribute(ONNX_NAMESPACE::NodeProto* n_p, Node* n, Symbol name) {
auto attr = n_p->add_attribute();
attr->set_name(name.toString());
switch (n->kindOf(name)) {
Expand Down Expand Up @@ -551,7 +551,7 @@ void addAttribute(ONNX_NAMESPACE::NodeProto* n_p, Node* n, Symbol name) {
}
}

void encodeTypeProtoTensorType(ONNX_NAMESPACE::TypeProto_Tensor* tensor_type, Value* n) {
static void encodeTypeProtoTensorType(ONNX_NAMESPACE::TypeProto_Tensor* tensor_type, Value* n) {
if (n->elemType() != 0) {
tensor_type->set_elem_type(n->elemType());
}
Expand All @@ -570,7 +570,7 @@ void encodeTypeProtoTensorType(ONNX_NAMESPACE::TypeProto_Tensor* tensor_type, Va
}
}

void encodeValueInfo(ONNX_NAMESPACE::ValueInfoProto* v, Value* n) {
static void encodeValueInfo(ONNX_NAMESPACE::ValueInfoProto* v, Value* n) {
v->set_name(value_name(n));
if (n->elemType() != 0 || n->has_sizes()) {
ONNX_NAMESPACE::TypeProto* t = v->mutable_type();
Expand Down
29 changes: 15 additions & 14 deletions onnx/cpp2py_export.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,10 @@
#include <limits>
#include <tuple>
#include <unordered_map>
#include <utility>

#include "onnx/checker.h"
#include "onnx/defs/function.h"
#include "onnx/common/ir_pb_converter.h"
#include "onnx/defs/parser.h"
#include "onnx/defs/printer.h"
#include "onnx/defs/schema.h"
Expand Down Expand Up @@ -45,26 +46,26 @@ static std::string ProtoBytesToText(const py::bytes& bytes) {
return ProtoToString(proto);
}

template <typename T, typename Ts = typename std::remove_const<T>::type>
std::pair<std::unique_ptr<Ts[]>, std::unordered_map<std::string, T*>> ParseProtoFromBytesMap(
std::unordered_map<std::string, py::bytes> bytesMap) {
template <typename T, typename Ts = std::remove_const_t<T>>
static std::pair<std::unique_ptr<Ts[]>, std::unordered_map<std::string, T*>> ParseProtoFromBytesMap(
const std::unordered_map<std::string, py::bytes>& bytesMap) {
std::unique_ptr<Ts[]> values(new Ts[bytesMap.size()]);
std::unordered_map<std::string, T*> result;
size_t i = 0;
for (auto kv : bytesMap) {
for (const auto& kv : bytesMap) {
ParseProtoFromPyBytes(&values[i], kv.second);
result[kv.first] = &values[i];
i++;
}
return std::make_pair(std::move(values), result);
}

std::unordered_map<std::string, py::bytes> CallNodeInferenceFunction(
static std::unordered_map<std::string, py::bytes> CallNodeInferenceFunction(
OpSchema* schema,
const py::bytes& nodeBytes,
std::unordered_map<std::string, py::bytes> valueTypesByNameBytes,
std::unordered_map<std::string, py::bytes> inputDataByNameBytes,
std::unordered_map<std::string, py::bytes> inputSparseDataByNameBytes,
const std::unordered_map<std::string, py::bytes>& valueTypesByNameBytes,
const std::unordered_map<std::string, py::bytes>& inputDataByNameBytes,
const std::unordered_map<std::string, py::bytes>& inputSparseDataByNameBytes,
std::unordered_map<std::string, int> opsetImports,
const int irVersion) {
NodeProto node{};
Expand Down Expand Up @@ -244,7 +245,7 @@ PYBIND11_MODULE(onnx_cpp2py_export, onnx_cpp2py_export) {
py::init([](std::string name,
std::string domain,
int since_version,
std::string doc,
const std::string& doc,
std::vector<OpSchema::FormalParameter> inputs,
std::vector<OpSchema::FormalParameter> outputs,
std::vector<std::tuple<std::string, std::vector<std::string>, std::string>> type_constraints,
Expand Down Expand Up @@ -586,7 +587,7 @@ PYBIND11_MODULE(onnx_cpp2py_export, onnx_cpp2py_export) {
version_converter.doc() = "VersionConverter submodule";
py::register_exception<ConvertError>(version_converter, "ConvertError");

version_converter.def("convert_version", [](const py::bytes& bytes, py::int_ target) {
version_converter.def("convert_version", [](const py::bytes& bytes, const py::int_& target) {
ModelProto proto{};
ParseProtoFromPyBytes(&proto, bytes);
shape_inference::InferShapes(proto);
Expand Down Expand Up @@ -659,8 +660,8 @@ PYBIND11_MODULE(onnx_cpp2py_export, onnx_cpp2py_export) {
shape_inference.def(
"infer_function_output_types",
[](const py::bytes& function_proto_bytes,
const std::vector<py::bytes> input_types_bytes,
const std::vector<py::bytes> attributes_bytes) -> std::vector<py::bytes> {
const std::vector<py::bytes>& input_types_bytes,
const std::vector<py::bytes>& attributes_bytes) -> std::vector<py::bytes> {
FunctionProto proto{};
ParseProtoFromPyBytes(&proto, function_proto_bytes);

Expand All @@ -686,7 +687,7 @@ PYBIND11_MODULE(onnx_cpp2py_export, onnx_cpp2py_export) {
for (auto& type_proto : output_types) {
std::string out;
type_proto.SerializeToString(&out);
result.push_back(py::bytes(out));
result.emplace_back(out);
}
return result;
});
Expand Down
6 changes: 2 additions & 4 deletions onnx/defs/function.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,14 @@

#include "onnx/defs/function.h"

#include <map>

#include "onnx/defs/schema.h"

namespace ONNX_NAMESPACE {
std::string InteralTensorNameGenerator(const std::string& node_name, const std::string& internal_name) {
static std::string InteralTensorNameGenerator(const std::string& node_name, const std::string& internal_name) {
std::string new_name = "Func_" + node_name + internal_name;
return new_name;
}

namespace ONNX_NAMESPACE {
void FunctionExpandHelper(
const NodeProto& node,
const FunctionProto& func,
Expand Down
2 changes: 1 addition & 1 deletion onnx/defs/generator/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ Output: [10, 8, 6]
)DOC";

template <typename T>
inline int64_t
static int64_t
compute_output_dim_for_range(const TensorProto* start, const TensorProto* limit, const TensorProto* delta) {
if (!start->dims().empty() || !limit->dims().empty() || !delta->dims().empty()) {
fail_shape_inference("Input to 'Range' op should be scalars (Tensor with only one element and shape empty)");
Expand Down
Loading

0 comments on commit 8b1209b

Please sign in to comment.