Skip to content

Commit d463d14

Browse files
JakeStevensfacebook-github-bot
authored andcommitted
Address various warnings as errors (#8581)
Summary: Pull Request resolved: #8581 Some projects uses more restrictive build options than currently used in ET CI. This means we encountered a number of errors when enabling for a microcontroller. Reviewed By: digantdesai, swolchok Differential Revision: D69139962
1 parent 5cf0106 commit d463d14

35 files changed

+176
-135
lines changed

extension/threadpool/cpuinfo_utils.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/extension/threadpool/cpuinfo_utils.h>
1011

1112
#include <fstream>
@@ -84,7 +85,7 @@ bool populate_available_cpu_mids() {
8485
cpu_midrs->resize(num_possible_cores);
8586
const std::string kMidrFilePathPrefix = "/sys/devices/system/cpu/cpu";
8687
const std::string kMidrFilePathSuffix = "/regs/identification/midr_el1";
87-
for (int32_t i = 0; i < num_possible_cores; ++i) {
88+
for (const auto i : c10::irange(num_possible_cores)) {
8889
std::string midr_file_path =
8990
kMidrFilePathPrefix + std::to_string(i) + kMidrFilePathSuffix;
9091
ET_LOG(Info, "Reading file %s", midr_file_path.c_str());
@@ -115,7 +116,7 @@ uint32_t _get_num_performant_cores() {
115116
ET_LOG(Info, "CPU info and manual query on # of cpus dont match.");
116117
return 0;
117118
}
118-
for (int32_t i = 0; i < cpu_midrs->size(); ++i) {
119+
for (const auto i : c10::irange(cpu_midrs->size())) {
119120
uint32_t masked_midr = (*cpu_midrs)[i] & RIVISION_MASK;
120121
switch (masked_midr) {
121122
case CPUINFO_ARM_MIDR_CORTEX_A520:
@@ -148,7 +149,7 @@ uint32_t get_num_performant_cores() {
148149
uint32_t num_possible_cores = cpuinfo_get_processors_count();
149150
uint32_t num_non_performant_core = 0;
150151
if (uarch_count > 1) {
151-
for (int32_t i = 0; i < uarch_count; ++i) {
152+
for (const auto i : c10::irange(uarch_count)) {
152153
const struct cpuinfo_uarch_info* uarch_info = cpuinfo_get_uarch(i);
153154
if (is_non_performant_core(uarch_info)) {
154155
num_non_performant_core += uarch_info->processor_count;

extension/threadpool/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ def define_common_targets():
2323
srcs = _THREADPOOL_SRCS,
2424
deps = [
2525
"//executorch/runtime/core:core",
26+
"//executorch/runtime/core/portable_type/c10/c10:c10",
2627
],
2728
exported_headers = _THREADPOOL_HEADERS,
2829
exported_deps = [

kernels/portable/cpu/op__to_dim_order_copy.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
10+
911
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1012
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
1113
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
@@ -41,7 +43,7 @@ int64_t coordinateToIndexWithDimOrder(
4143

4244
dim_order_to_stride_nocheck(
4345
sizes.data(), dim_order.data(), sizes.size(), strides);
44-
for (size_t i = 0; i < self.dim(); ++i) {
46+
for (const auto i : c10::irange(self.dim())) {
4547
index += cur_indices[i] * strides[i];
4648
}
4749
return index;
@@ -59,7 +61,7 @@ void _to_dim_order_copy_impl(const Tensor& self, Tensor& out) {
5961
for (ssize_t i = 0; i < self.numel(); i++) {
6062
// Update the current indices.
6163
for (ssize_t j = self.dim() - 1; j >= 0; j--) {
62-
if (coordinate[j] + 1 < self.size(j)) {
64+
if (coordinate[j] + 1 < static_cast<size_t>(self.size(j))) {
6365
coordinate[j]++;
6466
break;
6567
} else {

kernels/portable/cpu/op_amax.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011

1112
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
@@ -44,7 +45,7 @@ Tensor& amax_out(
4445

4546
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amax.out", CTYPE, [&]() {
4647
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
47-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
48+
for (const auto out_ix : c10::irange(out.numel())) {
4849
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
4950
[](CTYPE v, CTYPE max_v) {
5051
return std::isnan(v) || v > max_v ? v : max_v;

kernels/portable/cpu/op_expand_copy.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,8 @@ Tensor& expand_copy_out(
9696

9797
ET_KERNEL_CHECK(
9898
ctx,
99-
repeat_tensor(self, {repeats, repeats_size}, out) == Error::Ok,
99+
repeat_tensor(self, makeArrayRef(repeats, repeats_size), out) ==
100+
Error::Ok,
100101
InvalidArgument,
101102
out);
102103

kernels/portable/cpu/util/broadcast_util.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
910
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
1011
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1112
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
@@ -274,7 +275,7 @@ void delinearize_index(
274275
size_t* out_indexes,
275276
const size_t out_indexes_len) {
276277
ET_CHECK(shape.size() <= out_indexes_len);
277-
for (auto i = 0; i < shape.size(); ++i) {
278+
for (size_t i = 0; i < shape.size(); ++i) {
278279
auto dim = shape.size() - 1 - i;
279280
auto dim_size = shape[dim];
280281
out_indexes[dim] = linear_index % dim_size;
@@ -304,7 +305,8 @@ size_t linearize_access_indexes(
304305
size_t linear_index = 0;
305306
for (size_t i = 0; i < indexes_broadcast_from.size(); ++i) {
306307
// If this dimension is broadcasted, add zero to the linear address.
307-
if (indexes_broadcast_from[i] >= broadcast_from_shape[i]) {
308+
if (indexes_broadcast_from[i] >=
309+
static_cast<size_t>(broadcast_from_shape[i])) {
308310
ET_CHECK_MSG(
309311
broadcast_from_shape[i] == 1,
310312
"Expected dim size == 1 if broadcasted, but actual dim size is %zu",

kernels/portable/cpu/util/broadcast_util.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,7 @@ inline void apply_binary_elementwise_fn(
289289
const CTYPE_B* const data_b = b.const_data_ptr<CTYPE_B>();
290290
CTYPE_OUT* const data_out = out.mutable_data_ptr<CTYPE_OUT>();
291291

292-
for (size_t i = 0; i < out.numel(); ++i) {
292+
for (size_t i = 0; i < static_cast<size_t>(out.numel()); ++i) {
293293
size_t a_linear_index = i;
294294
size_t b_linear_index = i;
295295

kernels/portable/cpu/util/functional_util.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88

99
#pragma once
1010

11+
#include <c10/util/irange.h>
12+
1113
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1214
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
1315

@@ -30,7 +32,7 @@ inline CTYPE apply_unary_reduce_fn(
3032
const int64_t size,
3133
const int64_t stride = 1) {
3234
CTYPE acc_val = data_in[0];
33-
for (size_t i = 1; i < size; i++) {
35+
for (const auto i : c10::irange(size)) {
3436
acc_val = reduce_fun(data_in[i * stride], acc_val);
3537
}
3638
return acc_val;
@@ -51,7 +53,7 @@ inline void apply_unary_map_fn(
5153
CTYPE_OUT* const data_out,
5254
const int64_t size,
5355
const int64_t stride = 1) {
54-
for (size_t i = 0; i < size; i++) {
56+
for (const auto i : c10::irange(size)) {
5557
data_out[i * stride] = map_fun(data_in[i * stride]);
5658
}
5759
}
@@ -77,7 +79,7 @@ inline CTYPE_OUT apply_unary_map_reduce_fn(
7779
const int64_t size,
7880
const int64_t stride = 1) {
7981
CTYPE_OUT acc_val = map_fun(data_in[0]);
80-
for (size_t i = 1; i < size; ++i) {
82+
for (const auto i : c10::irange(size)) {
8183
acc_val = reduce_fun(map_fun(data_in[i * stride]), acc_val);
8284
}
8385
return acc_val;

kernels/portable/cpu/util/reduce_util.cpp

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,7 @@ ET_NODISCARD bool check_dim_list_is_valid(
4848
}
4949

5050
const size_t non_neg_d = _normalize_non_neg_d(d, in.dim());
51-
ET_LOG_AND_RETURN_IF_FALSE(
52-
non_neg_d < kTensorDimensionLimit && non_neg_d >= 0);
51+
ET_LOG_AND_RETURN_IF_FALSE(non_neg_d < kTensorDimensionLimit);
5352

5453
ET_CHECK_OR_RETURN_FALSE(
5554
dim_exist[non_neg_d] == false,
@@ -86,7 +85,7 @@ size_t get_reduced_dim_product(
8685
}
8786
size_t dim_product = 1;
8887
if (!dim.has_value()) {
89-
for (size_t i = 0; i < in.dim(); ++i) {
88+
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
9089
dim_product *= in.size(i);
9190
}
9291
return dim_product;
@@ -108,7 +107,7 @@ size_t get_reduced_dim_product(
108107
size_t dim_product = 1;
109108
const size_t in_dim = in.dim();
110109
if (!dim_list.has_value() || dim_list.value().size() == 0) {
111-
for (size_t i = 0; i < in.dim(); ++i) {
110+
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
112111
dim_product *= in.size(i);
113112
}
114113
return dim_product;
@@ -136,7 +135,7 @@ size_t get_out_numel(
136135
ET_CHECK_VALID_DIM(dim_val, in.dim());
137136
}
138137
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in.dim());
139-
for (size_t d = 0; d < in.dim(); ++d) {
138+
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
140139
if (d != non_neg_dim) {
141140
out_numel *= in.size(d);
142141
}
@@ -155,7 +154,7 @@ size_t get_out_numel(
155154
dim_list) {
156155
size_t out_numel = 1;
157156
if (dim_list.has_value() && dim_list.value().size() != 0) {
158-
for (size_t d = 0; d < in.dim(); ++d) {
157+
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
159158
if (!check_dim_in_dim_list(d, in.dim(), dim_list.value())) {
160159
out_numel *= in.size(d);
161160
}
@@ -234,7 +233,7 @@ size_t compute_reduced_out_size(
234233
if (dim.has_value()) {
235234
const auto dim_val = dim.value();
236235
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in_dim);
237-
for (ssize_t i = 0; i < non_neg_dim; ++i) {
236+
for (size_t i = 0; i < non_neg_dim; ++i) {
238237
sizes_arr[i] = in.size(i);
239238
}
240239
if (keepdim) {
@@ -250,7 +249,7 @@ size_t compute_reduced_out_size(
250249
}
251250
} else {
252251
if (keepdim) {
253-
for (size_t i = 0; i < in_dim; ++i) {
252+
for (size_t i = 0; i < static_cast<size_t>(in_dim); ++i) {
254253
sizes_arr[i] = 1;
255254
}
256255
} else {
@@ -266,7 +265,9 @@ size_t compute_reduced_out_size(
266265
dim_list,
267266
bool keepdim,
268267
executorch::aten::SizesType* sizes_arr) {
269-
const auto in_dim = in.dim();
268+
// check_dim_in_dim_list and later comparisons
269+
// expect in_dim to be size_t, so cast it here
270+
const size_t in_dim = static_cast<size_t>(in.dim());
270271
size_t out_dim = in_dim;
271272

272273
if (dim_list.has_value() && dim_list.value().size() != 0) {

kernels/portable/cpu/util/reduce_util.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,15 @@ void apply_on_flat_ix_with_dim_mask_and_base(
5050
const size_t start,
5151
const size_t end) {
5252
// Compute innermost dim from dim list
53-
size_t inner_dim = in.dim() - 1;
53+
int64_t inner_dim = in.dim() - 1;
5454
while (!dim_mask[inner_dim]) {
5555
inner_dim--;
5656
}
5757

5858
// Initialize array of indices per dimension. This array is used to maintain
5959
// the per-dimension index of the element in `in` that is being reduced over
6060
// Only the dims that are in the dim list are relevant.
61-
size_t dim_index[kTensorDimensionLimit];
61+
int64_t dim_index[kTensorDimensionLimit];
6262
for (int64_t d = 0; d < in.dim(); d++) {
6363
dim_index[d] = 0;
6464
}

kernels/portable/cpu/util/repeat_util.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
#include <cstring>
1010

11+
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
1112
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1213
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
1314
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
@@ -26,7 +27,7 @@ bool check_repeat_args(
2627
Tensor& out) {
2728
// Ensure the self tensors list is non-empty.
2829
ET_CHECK_OR_RETURN_FALSE(
29-
repeats.size() >= self.dim(),
30+
static_cast<ssize_t>(repeats.size()) >= self.dim(),
3031
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor");
3132

3233
// Repeat arrayref shall not contain negative element.
@@ -39,7 +40,7 @@ bool check_repeat_args(
3940

4041
/// Check if out.size() is legal.
4142
ET_CHECK_OR_RETURN_FALSE(
42-
out.dim() == repeats.size(),
43+
static_cast<size_t>(out.dim()) == repeats.size(),
4344
"The dimension of out shall equal size of repeats, but now is %zd and %zd",
4445
out.dim(),
4546
repeats.size());
@@ -48,7 +49,7 @@ bool check_repeat_args(
4849
// kTensorDimensionLimit. Only check out tensor because the number of
4950
// dimension of out tensor shall have more than or equal to self tensor
5051
ET_CHECK_OR_RETURN_FALSE(
51-
out.dim() <= kTensorDimensionLimit,
52+
static_cast<size_t>(out.dim()) <= kTensorDimensionLimit,
5253
"The dimension of input and output should not be larger than %zd",
5354
kTensorDimensionLimit);
5455

@@ -58,7 +59,7 @@ bool check_repeat_args(
5859
// repeats, and called it reformat_self_size. We then make point-to-point mul
5960
// of reformat_self_size and repeats. The result should equal out.size().
6061
size_t reformat_self_size[kTensorDimensionLimit];
61-
for (size_t i = 0; i < out.dim() - self.dim(); i++) {
62+
for (ssize_t i = 0; i < out.dim() - self.dim(); i++) {
6263
reformat_self_size[i] = 1;
6364
}
6465

@@ -130,7 +131,7 @@ void repeat_internal(
130131
// The increment along index of slot array to reach the next possible valid
131132
// value.
132133
int64_t incr[kTensorDimensionLimit];
133-
for (size_t i = 0; i < self_dim; i++) {
134+
for (size_t i = 0; i < static_cast<size_t>(self_dim); i++) {
134135
incr[i] = self_size[i];
135136
}
136137

@@ -140,7 +141,7 @@ void repeat_internal(
140141
// than self).
141142
size_t index = self_dim - 1;
142143
size_t start = out.dim() - self_dim;
143-
while (slots[0] != out.size(start)) {
144+
while (slots[0] != static_cast<size_t>(out.size(start))) {
144145
// Compute the offset (from origin) in the out tensor where this self
145146
// data will be copied to.
146147
size_t offset = compute_access_offset(slots, strides, self_dim);
@@ -150,7 +151,7 @@ void repeat_internal(
150151
slots[index] += incr[index];
151152
// If we have reached the limit in the innermost dimension, successively
152153
// increment the slot index of outer dimensions.
153-
while (slots[index] == out.size(start + index)) {
154+
while (slots[index] == static_cast<size_t>(out.size(start + index))) {
154155
if (index == 0) {
155156
break;
156157
}
@@ -226,7 +227,7 @@ Error repeat_tensor(
226227
// so we reset the upper bound of innermost dim to 1. 'in_incr' indicates
227228
// the size (in bytes) of the self data.
228229
int64_t limits[kTensorDimensionLimit];
229-
for (size_t i = 0; i < self_dim; i++) {
230+
for (ssize_t i = 0; i < self_dim; i++) {
230231
limits[i] = self_size[i];
231232
}
232233

kernels/portable/cpu/util/targets.bzl

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ def define_common_targets():
6161
"//executorch/runtime/core/exec_aten/util:scalar_type_util",
6262
"//executorch/runtime/core/exec_aten/util:tensor_util",
6363
],
64-
compiler_flags = ["-Wno-missing-prototypes"],
6564
visibility = ["//executorch/kernels/portable/cpu/..."],
6665
)
6766

@@ -71,7 +70,6 @@ def define_common_targets():
7170
exported_headers = [
7271
"broadcast_util.h",
7372
],
74-
compiler_flags = ["-Wno-missing-prototypes"],
7573
deps = [
7674
":repeat_util",
7775
"//executorch/runtime/kernel:kernel_includes",

kernels/prim_ops/et_view.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,8 @@ bool get_view_target_size(
3232
executorch::aten::ArrayRef<int64_t> size,
3333
int64_t dim,
3434
executorch::aten::SizesType* out_size) {
35-
ET_LOG_AND_RETURN_IF_FALSE(size.size() == dim);
35+
ET_LOG_AND_RETURN_IF_FALSE(
36+
dim >= 0 && size.size() == static_cast<size_t>(dim));
3637
int minus1_dim = -1;
3738
int n_zero = 0;
3839
int64_t numel_without_minus_1 = 1;

runtime/core/data_loader.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -69,12 +69,12 @@ class DataLoader {
6969
SegmentInfo() = default;
7070

7171
explicit SegmentInfo(
72-
Type segment_type,
73-
size_t segment_index = 0,
74-
const char* descriptor = nullptr)
75-
: segment_type(segment_type),
76-
segment_index(segment_index),
77-
descriptor(descriptor) {}
72+
Type segment_type_,
73+
size_t segment_index_ = 0,
74+
const char* descriptor_ = nullptr)
75+
: segment_type(segment_type_),
76+
segment_index(segment_index_),
77+
descriptor(descriptor_) {}
7878
};
7979

8080
virtual ~DataLoader() = default;

0 commit comments

Comments
 (0)