Skip to content

Commit

Permalink
Run clang-format in CI (#15524)
Browse files Browse the repository at this point in the history
### Description

Run clang-format in CI. Formatted all c/c++, objective-c/c++ files.

Excluded

```
    'onnxruntime/core/mlas/**',
    'onnxruntime/contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/**',
```

because they contain assembly or is data heavy


### Motivation and Context

Coding style consistency
  • Loading branch information
justinchuby authored Apr 18, 2023
1 parent 2700d01 commit cf19c36
Show file tree
Hide file tree
Showing 1,023 changed files with 11,304 additions and 11,120 deletions.
1 change: 0 additions & 1 deletion .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,3 @@ DerivePointerAlignment: false
# NamespaceIndentation: All

...

45 changes: 44 additions & 1 deletion .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# To lint local changes:
#
# ```bash
# lintrunner -m main
# lintrunner
# ```
#
# To lint all files:
Expand All @@ -33,6 +33,8 @@
# To update an existing linting rule or create a new one, modify this file or create a
# new adapter following examples in https://github.com/justinchuby/lintrunner-adapters.

merge_base_with = 'origin/main'

[[linter]]
code = 'RUFF'
include_patterns = [
Expand Down Expand Up @@ -168,3 +170,44 @@ command = [
'@{{PATHSFILE}}'
]
is_formatter = true

[[linter]]
code = 'CLANGFORMAT'
include_patterns = [
'**/*.h',
'**/*.cc',
'**/*.hpp',
'**/*.cpp',
'**/*.m',
'**/*.mm',
]
exclude_patterns = [
'java/**', # FIXME: Enable clang-format for java
'js/**',
'onnxruntime/contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/**', # Contains data chunks
'onnxruntime/core/flatbuffers/schema/ort.fbs.h', # Generated code
'onnxruntime/core/graph/contrib_ops/quantization_defs.cc',
'onnxruntime/core/mlas/**', # Contains assembly code
'winml/**', # FIXME: Enable clang-format for winml
]
command = [
'python',
'-m',
'lintrunner_adapters',
'run',
'clangformat_linter',
'--binary=clang-format',
'--fallback',
'--',
'@{{PATHSFILE}}'
]
init_command = [
'python',
'-m',
'lintrunner_adapters',
'run',
'pip_init',
'--dry-run={{DRYRUN}}',
'clang-format==16.0.1',
]
is_formatter = true
4 changes: 2 additions & 2 deletions cmake/uwp_stubs.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@
// Extending the std namespace is undefined behavior
// NOLINTNEXTLINE
namespace std {
inline char *getenv(const char*) { return nullptr; }
}
inline char* getenv(const char*) { return nullptr; }
} // namespace std
#endif
82 changes: 42 additions & 40 deletions csharp/test/Microsoft.AI.MachineLearning.Tests/main.cpp
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#include <stdio.h>
#include "winrt/microsoft.ai.machinelearning.h"
#include "winrt/windows.storage.h"
#include "winrt/windows.foundation.h"
#include "winrt/windows.foundation.collections.h"
#include "winrt/Windows.Graphics.h"
#include "winrt/Windows.Graphics.Imaging.h"
#include "winrt/Windows.Graphics.h"
#include "winrt/Windows.Media.h"
#include "winrt/microsoft.ai.machinelearning.h"
#include "winrt/windows.foundation.collections.h"
#include "winrt/windows.foundation.h"
#include "winrt/windows.storage.h"
#include <stdio.h>
#include <windows.h>

EXTERN_C IMAGE_DOS_HEADER __ImageBase;
Expand All @@ -15,42 +15,44 @@ using namespace winrt::Windows::Storage;
using namespace winrt::Windows::Media;
using namespace winrt::Windows::Graphics::Imaging;

std::wstring GetModulePath() {
std::wstring val;
wchar_t modulePath[MAX_PATH] = {0};
GetModuleFileNameW((HINSTANCE)&__ImageBase, modulePath, _countof(modulePath));
wchar_t drive[_MAX_DRIVE];
wchar_t dir[_MAX_DIR];
wchar_t filename[_MAX_FNAME];
wchar_t ext[_MAX_EXT];
_wsplitpath_s(modulePath, drive, _MAX_DRIVE, dir, _MAX_DIR, filename, _MAX_FNAME, ext, _MAX_EXT);
std::wstring GetModulePath()
{
std::wstring val;
wchar_t modulePath[MAX_PATH] = {0};
GetModuleFileNameW((HINSTANCE)&__ImageBase, modulePath, _countof(modulePath));
wchar_t drive[_MAX_DRIVE];
wchar_t dir[_MAX_DIR];
wchar_t filename[_MAX_FNAME];
wchar_t ext[_MAX_EXT];
_wsplitpath_s(modulePath, drive, _MAX_DRIVE, dir, _MAX_DIR, filename, _MAX_FNAME, ext, _MAX_EXT);

val = drive;
val += dir;
val = drive;
val += dir;

return val;
return val;
}

int main() {
printf("Load squeezenet.onnx.\n");
auto model = LearningModel::LoadFromFilePath(L"squeezenet.onnx");
printf("Load kitten_224.png as StorageFile.\n");
auto name = GetModulePath() + L"kitten_224.png";
auto image = StorageFile::GetFileFromPathAsync(name).get();
printf("Load StorageFile into Stream.\n");
auto stream = image.OpenAsync(FileAccessMode::Read).get();
printf("Create SoftwareBitmap from decoded Stream.\n");
auto softwareBitmap = BitmapDecoder::CreateAsync(stream).get().GetSoftwareBitmapAsync().get();
printf("Create VideoFrame.\n");
auto frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap);
printf("Create LearningModelSession.\n");
auto session = LearningModelSession(model);
printf("Create LearningModelBinding.\n");
auto binding = LearningModelBinding(session);
printf("Bind data_0.\n");
binding.Bind(L"data_0", frame);
printf("Evaluate.\n");
auto results = session.Evaluate(binding, L"");
printf("Success!\n");
return 0;
int main()
{
printf("Load squeezenet.onnx.\n");
auto model = LearningModel::LoadFromFilePath(L"squeezenet.onnx");
printf("Load kitten_224.png as StorageFile.\n");
auto name = GetModulePath() + L"kitten_224.png";
auto image = StorageFile::GetFileFromPathAsync(name).get();
printf("Load StorageFile into Stream.\n");
auto stream = image.OpenAsync(FileAccessMode::Read).get();
printf("Create SoftwareBitmap from decoded Stream.\n");
auto softwareBitmap = BitmapDecoder::CreateAsync(stream).get().GetSoftwareBitmapAsync().get();
printf("Create VideoFrame.\n");
auto frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap);
printf("Create LearningModelSession.\n");
auto session = LearningModelSession(model);
printf("Create LearningModelBinding.\n");
auto binding = LearningModelBinding(session);
printf("Bind data_0.\n");
binding.Bind(L"data_0", frame);
printf("Evaluate.\n");
auto results = session.Evaluate(binding, L"");
printf("Success!\n");
return 0;
}
4 changes: 2 additions & 2 deletions include/onnxruntime/core/common/code_location.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,10 @@ struct CodeLocation {
out << (format == Format::kFilename ? FileNoPath() : file_and_path) << ":" << line_num << " " << function;
return out.str();
}
//utf-8. Because on Windows we compile our code with "/utf-8". And we assume the other platforms only use utf-8.
// utf-8. Because on Windows we compile our code with "/utf-8". And we assume the other platforms only use utf-8.
const std::string file_and_path;
const int line_num;
//utf-8
// utf-8
const std::string function;
const std::vector<std::string> stacktrace;
};
Expand Down
1 change: 0 additions & 1 deletion include/onnxruntime/core/common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
#include "core/common/make_string.h"
#include "core/common/status.h"


namespace onnxruntime {

using TimePoint = std::chrono::high_resolution_clock::time_point;
Expand Down
6 changes: 3 additions & 3 deletions include/onnxruntime/core/common/inlined_containers.h
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,9 @@ class NodeHashSet : public std::unordered_set<T,

template <typename Key, typename Value, typename Allocator>
class NodeHashMap : public std::unordered_map<Key, Value,
std::hash<Key>,
std::equal_to<Key>,
Allocator> {
std::hash<Key>,
std::equal_to<Key>,
Allocator> {
using Base = std::unordered_map<Key, Value,
std::hash<Key>,
std::equal_to<Key>,
Expand Down
10 changes: 6 additions & 4 deletions include/onnxruntime/core/common/logging/macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,15 +255,17 @@
#else
// Disabled in Release builds.
#define VLOGS(logger, level) \
if constexpr (true) {} else LOGS_CATEGORY(logger, VERBOSE, "VLOG" #level)
if constexpr (true) { \
} else \
LOGS_CATEGORY(logger, VERBOSE, "VLOG" #level)
#define VLOGS_USER(logger, level) \
if constexpr (true) {} else LOGS_USER_CATEGORY(logger, VERBOSE, "VLOG" #level)
if constexpr (true) { \
} else \
LOGS_USER_CATEGORY(logger, VERBOSE, "VLOG" #level)
#define VLOGF(logger, level, format_str, ...)
#define VLOGF_USER(logger, level, format_str, ...)
#endif



// Default logger variants
#define VLOGS_DEFAULT(level) \
VLOGS(::onnxruntime::logging::LoggingManager::DefaultLogger(), level)
Expand Down
2 changes: 1 addition & 1 deletion include/onnxruntime/core/common/profiler_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ struct EventRecord {

using Events = std::vector<EventRecord>;

//Execution Provider Profiler
// Execution Provider Profiler
class EpProfiler {
public:
virtual ~EpProfiler() = default;
Expand Down
4 changes: 2 additions & 2 deletions include/onnxruntime/core/common/status.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,10 @@ class [[nodiscard]] Status {

Status(StatusCategory category, int code);

GSL_SUPPRESS(r.11)
GSL_SUPPRESS(r .11)
Status(const Status& other)
: state_((other.state_ == nullptr) ? nullptr : new State(*other.state_)) {}
GSL_SUPPRESS(r.11)
GSL_SUPPRESS(r .11)
Status& operator=(const Status& other) {
if (state_ != other.state_) {
if (other.state_ == nullptr) {
Expand Down
9 changes: 4 additions & 5 deletions include/onnxruntime/core/eager/ort_kernel_invoker.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,11 @@ namespace onnxruntime {

class ORTInvoker {
public:
ORTInvoker(std::shared_ptr<IExecutionProvider> execution_provider,
ORTInvoker(std::shared_ptr<IExecutionProvider> execution_provider,
const logging::Logger& logger,
const IOnnxRuntimeOpSchemaRegistryList& custom_op_registries) :
execution_provider_(std::move(execution_provider)), logger_(logger), custom_op_registries_(custom_op_registries) {
const IOnnxRuntimeOpSchemaRegistryList& custom_op_registries) : execution_provider_(std::move(execution_provider)), logger_(logger), custom_op_registries_(custom_op_registries) {
if (!execution_provider_) {
ORT_THROW("Execution provider is nullptr");
ORT_THROW("Execution provider is nullptr");
}
}

Expand All @@ -36,7 +35,7 @@ class ORTInvoker {
}

common::Status Invoke(const std::string& op_name,
//optional inputs / outputs?
// optional inputs / outputs?
const std::vector<OrtValue>& inputs,
std::vector<OrtValue>& outputs,
const NodeAttributes* attributes,
Expand Down
6 changes: 3 additions & 3 deletions include/onnxruntime/core/framework/data_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -386,8 +386,8 @@ void AssignOpaqueDomainName(const char* domain, const char* name,

} // namespace data_types_internal

//The suppressed warning is: "The type with a virtual function needs either public virtual or protected nonvirtual destructor."
//However, we do not allocate this type on heap.
// The suppressed warning is: "The type with a virtual function needs either public virtual or protected nonvirtual destructor."
// However, we do not allocate this type on heap.
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(push)
#pragma warning(disable : 26436)
Expand Down Expand Up @@ -614,7 +614,7 @@ class OptionalType :
#if !defined(DISABLE_OPTIONAL_TYPE)
OptionalType()
#else
OptionalType() : DisabledTypeBase { DataTypeImpl::GeneralType::kOptional, 0 }
OptionalType() : DisabledTypeBase{DataTypeImpl::GeneralType::kOptional, 0}
#endif
{
using namespace data_types_internal;
Expand Down
33 changes: 16 additions & 17 deletions include/onnxruntime/core/framework/data_types_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,17 +29,17 @@
namespace onnxruntime {
namespace utils {

// The following primitives are strongly recommended for switching on tensor input datatypes for
// kernel implementations.
//
// 1) If you need to handle all of the primitive tensor contained datatypes, the best choice would be macros
// DispatchOnTensorType or DispatchOnTensorTypeWithReturn. Use inline wrappers so your function can be invoked as function<T>().
// 2) if you have a few types, use Tensor.IsDataType<T>()/IsDataTypeString() or use utils::IsPrimitiveDataType<T>()
// if you have a standalone MLDatatType with a sequence of if/else statements.
// 3) For something in between, we suggest to use CallDispatcher pattern.
//
// Invoking DataTypeImpl::GetType<T>() for switching on input types is discouraged and should be avoided.
// Every primitive type carries with it an integer constant that can be used for quick switching on types.
// The following primitives are strongly recommended for switching on tensor input datatypes for
// kernel implementations.
//
// 1) If you need to handle all of the primitive tensor contained datatypes, the best choice would be macros
// DispatchOnTensorType or DispatchOnTensorTypeWithReturn. Use inline wrappers so your function can be invoked as function<T>().
// 2) if you have a few types, use Tensor.IsDataType<T>()/IsDataTypeString() or use utils::IsPrimitiveDataType<T>()
// if you have a standalone MLDatatType with a sequence of if/else statements.
// 3) For something in between, we suggest to use CallDispatcher pattern.
//
// Invoking DataTypeImpl::GetType<T>() for switching on input types is discouraged and should be avoided.
// Every primitive type carries with it an integer constant that can be used for quick switching on types.

#define DispatchOnTensorType(tensor_type, function, ...) \
switch (tensor_type->AsPrimitiveDataType()->GetDataType()) { \
Expand Down Expand Up @@ -498,11 +498,10 @@ class ContainerChecker {
ORT_ENFORCE(++index < c.size(), "Sequence is missing type entry for its element");
constexpr int32_t prim_type = ToTensorProtoElementType<T>();
// Check if this is a primitive type and it matches
if constexpr(prim_type != ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED) {
if constexpr (prim_type != ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED) {
return c[index].IsType(data_types_internal::ContainerType::kTensor) &&
c[index].IsPrimType(prim_type);
}
else {
} else {
// T is not primitive, check next entry for non-primitive proto
return IsContainerOfType<T>::check(c, index);
}
Expand All @@ -528,11 +527,11 @@ class ContainerChecker {
}
ORT_ENFORCE(++index < c.size(), "Map is missing type entry for its value");
constexpr int32_t val_type = ToTensorProtoElementType<V>();
if constexpr(val_type != ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED) {
if constexpr (val_type != ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED) {
return c[index].IsType(data_types_internal::ContainerType::kTensor) &&
c[index].IsPrimType(val_type);
}
else return IsContainerOfType<V>::check(c, index);
} else
return IsContainerOfType<V>::check(c, index);
}
};

Expand Down
11 changes: 4 additions & 7 deletions include/onnxruntime/core/framework/float16.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,9 @@ struct BFloat16 {
val = static_cast<uint16_t>((U32 + rounding_bias) >> 16);
}
#else
if constexpr(endian::native == endian::little) {
if constexpr (endian::native == endian::little) {
std::memcpy(&val, reinterpret_cast<char*>(&v) + sizeof(uint16_t), sizeof(uint16_t));
}
else {
} else {
std::memcpy(&val, &v, sizeof(uint16_t));
}
#endif
Expand All @@ -93,11 +92,10 @@ struct BFloat16 {
float result;
char* const first = reinterpret_cast<char*>(&result);
char* const second = first + sizeof(uint16_t);
if constexpr(endian::native == endian::little) {
if constexpr (endian::native == endian::little) {
std::memset(first, 0, sizeof(uint16_t));
std::memcpy(second, &val, sizeof(uint16_t));
}
else {
} else {
std::memcpy(first, &val, sizeof(uint16_t));
std::memset(second, 0, sizeof(uint16_t));
}
Expand All @@ -117,7 +115,6 @@ inline ORT_HOST_DEVICE bool operator==(const BFloat16& left, const BFloat16& rig
inline ORT_HOST_DEVICE bool operator!=(const BFloat16& left, const BFloat16& right) { return left.val != right.val; }
inline ORT_HOST_DEVICE bool operator<(const BFloat16& left, const BFloat16& right) { return left.val < right.val; }


// User defined suffixes to make it easier to declare
// initializers with MLFloat16 and BFloat16 from unsigned short
// E.g 10_f16 or 10_b16
Expand Down
2 changes: 1 addition & 1 deletion include/onnxruntime/core/framework/func_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ using DestroyFunc = void (*)(void*, void*);
using AllocatorHandle = void*;

typedef struct {
//right now we only include allocation for host memory
// right now we only include allocation for host memory
AllocateFunc allocate_func;
DestroyFunc release_func;
AllocatorHandle allocator_handle;
Expand Down
Loading

0 comments on commit cf19c36

Please sign in to comment.