Skip to content

Commit e1bd3ea

Browse files
JakeStevensfacebook-github-bot
authored andcommitted
Address various warnings as errors
Summary: Some projects uses more restrictive build options than currently used in ET CI. This means we encountered a number of errors when enabling for a microcontroller. Reviewed By: digantdesai, swolchok Differential Revision: D69139962
1 parent b6ffe1a commit e1bd3ea

25 files changed

+121
-108
lines changed

kernels/portable/cpu/op_expand_copy.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ Tensor& expand_copy_out(
9696

9797
ET_KERNEL_CHECK(
9898
ctx,
99-
repeat_tensor(self, {repeats, repeats_size}, out) == Error::Ok,
99+
repeat_tensor(self, makeArrayRef(repeats, repeats_size), out) == Error::Ok,
100100
InvalidArgument,
101101
out);
102102

kernels/portable/cpu/util/broadcast_util.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
910
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
1011
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1112
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
@@ -274,7 +275,7 @@ void delinearize_index(
274275
size_t* out_indexes,
275276
const size_t out_indexes_len) {
276277
ET_CHECK(shape.size() <= out_indexes_len);
277-
for (auto i = 0; i < shape.size(); ++i) {
278+
for (size_t i = 0; i < shape.size(); ++i) {
278279
auto dim = shape.size() - 1 - i;
279280
auto dim_size = shape[dim];
280281
out_indexes[dim] = linear_index % dim_size;
@@ -304,7 +305,7 @@ size_t linearize_access_indexes(
304305
size_t linear_index = 0;
305306
for (size_t i = 0; i < indexes_broadcast_from.size(); ++i) {
306307
// If this dimension is broadcasted, add zero to the linear address.
307-
if (indexes_broadcast_from[i] >= broadcast_from_shape[i]) {
308+
if (indexes_broadcast_from[i] >= static_cast<size_t>(broadcast_from_shape[i])) {
308309
ET_CHECK_MSG(
309310
broadcast_from_shape[i] == 1,
310311
"Expected dim size == 1 if broadcasted, but actual dim size is %zu",

kernels/portable/cpu/util/broadcast_util.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,7 @@ inline void apply_binary_elementwise_fn(
289289
const CTYPE_B* const data_b = b.const_data_ptr<CTYPE_B>();
290290
CTYPE_OUT* const data_out = out.mutable_data_ptr<CTYPE_OUT>();
291291

292-
for (size_t i = 0; i < out.numel(); ++i) {
292+
for (size_t i = 0; i < static_cast<size_t>(out.numel()); ++i) {
293293
size_t a_linear_index = i;
294294
size_t b_linear_index = i;
295295

kernels/portable/cpu/util/reduce_util.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ ET_NODISCARD bool check_dim_list_is_valid(
4949

5050
const size_t non_neg_d = _normalize_non_neg_d(d, in.dim());
5151
ET_LOG_AND_RETURN_IF_FALSE(
52-
non_neg_d < kTensorDimensionLimit && non_neg_d >= 0);
52+
non_neg_d < kTensorDimensionLimit);
5353

5454
ET_CHECK_OR_RETURN_FALSE(
5555
dim_exist[non_neg_d] == false,
@@ -86,7 +86,7 @@ size_t get_reduced_dim_product(
8686
}
8787
size_t dim_product = 1;
8888
if (!dim.has_value()) {
89-
for (size_t i = 0; i < in.dim(); ++i) {
89+
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
9090
dim_product *= in.size(i);
9191
}
9292
return dim_product;
@@ -108,7 +108,7 @@ size_t get_reduced_dim_product(
108108
size_t dim_product = 1;
109109
const size_t in_dim = in.dim();
110110
if (!dim_list.has_value() || dim_list.value().size() == 0) {
111-
for (size_t i = 0; i < in.dim(); ++i) {
111+
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
112112
dim_product *= in.size(i);
113113
}
114114
return dim_product;
@@ -136,7 +136,7 @@ size_t get_out_numel(
136136
ET_CHECK_VALID_DIM(dim_val, in.dim());
137137
}
138138
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in.dim());
139-
for (size_t d = 0; d < in.dim(); ++d) {
139+
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
140140
if (d != non_neg_dim) {
141141
out_numel *= in.size(d);
142142
}
@@ -155,7 +155,7 @@ size_t get_out_numel(
155155
dim_list) {
156156
size_t out_numel = 1;
157157
if (dim_list.has_value() && dim_list.value().size() != 0) {
158-
for (size_t d = 0; d < in.dim(); ++d) {
158+
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
159159
if (!check_dim_in_dim_list(d, in.dim(), dim_list.value())) {
160160
out_numel *= in.size(d);
161161
}
@@ -234,7 +234,7 @@ size_t compute_reduced_out_size(
234234
if (dim.has_value()) {
235235
const auto dim_val = dim.value();
236236
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in_dim);
237-
for (ssize_t i = 0; i < non_neg_dim; ++i) {
237+
for (size_t i = 0; i < non_neg_dim; ++i) {
238238
sizes_arr[i] = in.size(i);
239239
}
240240
if (keepdim) {
@@ -250,7 +250,7 @@ size_t compute_reduced_out_size(
250250
}
251251
} else {
252252
if (keepdim) {
253-
for (size_t i = 0; i < in_dim; ++i) {
253+
for (size_t i = 0; i < static_cast<size_t>(in_dim); ++i) {
254254
sizes_arr[i] = 1;
255255
}
256256
} else {
@@ -266,7 +266,9 @@ size_t compute_reduced_out_size(
266266
dim_list,
267267
bool keepdim,
268268
executorch::aten::SizesType* sizes_arr) {
269-
const auto in_dim = in.dim();
269+
// check_dim_in_dim_list and later comparisons
270+
// expect in_dim to be size_t, so cast it here
271+
const size_t in_dim = static_cast<size_t>(in.dim());
270272
size_t out_dim = in_dim;
271273

272274
if (dim_list.has_value() && dim_list.value().size() != 0) {

kernels/portable/cpu/util/repeat_util.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <cstring>
1010

1111
#include <executorch/runtime/core/exec_aten/exec_aten.h>
12+
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
1213
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
1314
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
1415
#include <executorch/runtime/platform/assert.h>
@@ -26,7 +27,7 @@ bool check_repeat_args(
2627
Tensor& out) {
2728
// Ensure the self tensors list is non-empty.
2829
ET_CHECK_OR_RETURN_FALSE(
29-
repeats.size() >= self.dim(),
30+
static_cast<ssize_t>(repeats.size()) >= self.dim(),
3031
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor");
3132

3233
// Repeat arrayref shall not contain negative element.
@@ -39,7 +40,7 @@ bool check_repeat_args(
3940

4041
/// Check if out.size() is legal.
4142
ET_CHECK_OR_RETURN_FALSE(
42-
out.dim() == repeats.size(),
43+
static_cast<size_t>(out.dim()) == repeats.size(),
4344
"The dimension of out shall equal size of repeats, but now is %zd and %zd",
4445
out.dim(),
4546
repeats.size());
@@ -48,7 +49,7 @@ bool check_repeat_args(
4849
// kTensorDimensionLimit. Only check out tensor because the number of
4950
// dimension of out tensor shall have more than or equal to self tensor
5051
ET_CHECK_OR_RETURN_FALSE(
51-
out.dim() <= kTensorDimensionLimit,
52+
static_cast<size_t>(out.dim()) <= kTensorDimensionLimit,
5253
"The dimension of input and output should not be larger than %zd",
5354
kTensorDimensionLimit);
5455

@@ -58,7 +59,7 @@ bool check_repeat_args(
5859
// repeats, and called it reformat_self_size. We then make point-to-point mul
5960
// of reformat_self_size and repeats. The result should equal out.size().
6061
size_t reformat_self_size[kTensorDimensionLimit];
61-
for (size_t i = 0; i < out.dim() - self.dim(); i++) {
62+
for (ssize_t i = 0; i < out.dim() - self.dim(); i++) {
6263
reformat_self_size[i] = 1;
6364
}
6465

@@ -130,7 +131,7 @@ void repeat_internal(
130131
// The increment along index of slot array to reach the next possible valid
131132
// value.
132133
int64_t incr[kTensorDimensionLimit];
133-
for (size_t i = 0; i < self_dim; i++) {
134+
for (size_t i = 0; i < static_cast<size_t>(self_dim); i++) {
134135
incr[i] = self_size[i];
135136
}
136137

@@ -140,7 +141,7 @@ void repeat_internal(
140141
// than self).
141142
size_t index = self_dim - 1;
142143
size_t start = out.dim() - self_dim;
143-
while (slots[0] != out.size(start)) {
144+
while (slots[0] != static_cast<size_t>(out.size(start))) {
144145
// Compute the offset (from origin) in the out tensor where this self
145146
// data will be copied to.
146147
size_t offset = compute_access_offset(slots, strides, self_dim);
@@ -150,7 +151,7 @@ void repeat_internal(
150151
slots[index] += incr[index];
151152
// If we have reached the limit in the innermost dimension, successively
152153
// increment the slot index of outer dimensions.
153-
while (slots[index] == out.size(start + index)) {
154+
while (slots[index] == static_cast<size_t>(out.size(start + index))) {
154155
if (index == 0) {
155156
break;
156157
}
@@ -226,7 +227,7 @@ Error repeat_tensor(
226227
// so we reset the upper bound of innermost dim to 1. 'in_incr' indicates
227228
// the size (in bytes) of the self data.
228229
int64_t limits[kTensorDimensionLimit];
229-
for (size_t i = 0; i < self_dim; i++) {
230+
for (ssize_t i = 0; i < self_dim; i++) {
230231
limits[i] = self_size[i];
231232
}
232233

kernels/portable/cpu/util/targets.bzl

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ def define_common_targets():
6161
"//executorch/runtime/core/exec_aten/util:scalar_type_util",
6262
"//executorch/runtime/core/exec_aten/util:tensor_util",
6363
],
64-
compiler_flags = ["-Wno-missing-prototypes"],
6564
visibility = ["//executorch/kernels/portable/cpu/..."],
6665
)
6766

@@ -71,7 +70,6 @@ def define_common_targets():
7170
exported_headers = [
7271
"broadcast_util.h",
7372
],
74-
compiler_flags = ["-Wno-missing-prototypes"],
7573
deps = [
7674
":repeat_util",
7775
"//executorch/runtime/kernel:kernel_includes",

runtime/core/data_loader.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -69,12 +69,12 @@ class DataLoader {
6969
SegmentInfo() = default;
7070

7171
explicit SegmentInfo(
72-
Type segment_type,
73-
size_t segment_index = 0,
74-
const char* descriptor = nullptr)
75-
: segment_type(segment_type),
76-
segment_index(segment_index),
77-
descriptor(descriptor) {}
72+
Type segment_type_,
73+
size_t segment_index_ = 0,
74+
const char* descriptor_ = nullptr)
75+
: segment_type(segment_type_),
76+
segment_index(segment_index_),
77+
descriptor(descriptor_) {}
7878
};
7979

8080
virtual ~DataLoader() = default;

runtime/core/exec_aten/util/dim_order_util.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ namespace runtime {
2222
namespace {
2323
template <typename DimOrderType>
2424
bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) {
25-
for (int32_t i = 0; i < dims; ++i) {
25+
for (size_t i = 0; i < dims; ++i) {
2626
if (dim_order[i] >= dims) {
2727
return false;
2828
}
@@ -42,7 +42,7 @@ template <typename DimOrderType>
4242
inline bool is_contiguous_dim_order(
4343
const DimOrderType* dim_order,
4444
const size_t dims) {
45-
for (int i = 0; i < dims; ++i) {
45+
for (size_t i = 0; i < dims; ++i) {
4646
if (dim_order[i] != i) {
4747
return false;
4848
}
@@ -74,7 +74,7 @@ bool is_channels_last_dim_order(
7474
if (dim_order[0] != 0) {
7575
return false;
7676
}
77-
int d = 1;
77+
size_t d = 1;
7878
while (d < dims - 1) {
7979
if (dim_order[d] != d + 1) {
8080
return false;
@@ -162,8 +162,8 @@ struct StrideDimOrder {
162162
StridesType stride;
163163
DimOrderType dim_order;
164164

165-
StrideDimOrder(StridesType stride, DimOrderType dim_order)
166-
: stride(stride), dim_order(dim_order) {}
165+
StrideDimOrder(StridesType stride_, DimOrderType dim_order_)
166+
: stride(stride_), dim_order(dim_order_) {}
167167
StrideDimOrder() = default;
168168
bool operator>(const StrideDimOrder& other) const {
169169
// descending order

runtime/core/exec_aten/util/tensor_util.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -582,7 +582,7 @@ inline bool tensors_have_same_dtype(
582582

583583
inline bool tensor_is_rank(executorch::aten::Tensor t, size_t rank) {
584584
ET_CHECK_OR_RETURN_FALSE(
585-
t.dim() == rank,
585+
static_cast<size_t>(t.dim()) == rank,
586586
"Expected tensor.dim() to be %zu, but got %zu",
587587
static_cast<size_t>(rank),
588588
static_cast<size_t>(t.dim()));
@@ -594,7 +594,7 @@ inline bool tensor_has_rank_greater_or_equal_to(
594594
executorch::aten::Tensor t,
595595
size_t rank) {
596596
ET_CHECK_OR_RETURN_FALSE(
597-
t.dim() >= rank,
597+
static_cast<size_t>(t.dim()) >= rank,
598598
"Expected tensor.dim() to be >= %zu, but got %zu",
599599
static_cast<size_t>(rank),
600600
static_cast<size_t>(t.dim()));
@@ -606,7 +606,7 @@ inline bool tensor_has_rank_smaller_or_equal_to(
606606
executorch::aten::Tensor t,
607607
size_t rank) {
608608
ET_CHECK_OR_RETURN_FALSE(
609-
t.dim() <= rank,
609+
static_cast<size_t>(t.dim()) <= rank,
610610
"Expected tensor.dim() to be <= %zu, but got %zu",
611611
static_cast<size_t>(rank),
612612
static_cast<size_t>(t.dim()));
@@ -663,12 +663,12 @@ inline bool tensors_have_same_size_at_dims(
663663
executorch::aten::Tensor b,
664664
size_t dim_b) {
665665
ET_CHECK_OR_RETURN_FALSE(
666-
dim_a < a.dim(),
666+
dim_a < static_cast<size_t>(a.dim()),
667667
"Cannot retrieve dim %zu from tensor with dim %zu",
668668
static_cast<size_t>(dim_a),
669669
static_cast<size_t>(a.dim()));
670670
ET_CHECK_OR_RETURN_FALSE(
671-
dim_b < b.dim(),
671+
dim_b < static_cast<size_t>(b.dim()),
672672
"Cannot retrieve dim %zu from tensor with dim %zu",
673673
static_cast<size_t>(dim_b),
674674
static_cast<size_t>(b.dim()));
@@ -700,7 +700,7 @@ inline bool tensors_have_same_shape(
700700
static_cast<size_t>(b.numel()),
701701
static_cast<size_t>(a.dim()),
702702
static_cast<size_t>(b.dim()));
703-
for (size_t d = 0; d < ET_MIN2(a.dim(), b.dim()); ++d) {
703+
for (size_t d = 0; d < static_cast<size_t>(ET_MIN2(a.dim(), b.dim())); ++d) {
704704
ET_LOG(
705705
Error,
706706
" size(%zu): (%zu, %zu)",
@@ -737,7 +737,7 @@ inline bool tensors_have_same_shape(
737737
static_cast<size_t>(a.dim()),
738738
static_cast<size_t>(b.dim()),
739739
static_cast<size_t>(c.dim()));
740-
for (size_t d = 0; d < ET_MIN3(a.dim(), b.dim(), c.dim()); ++d) {
740+
for (size_t d = 0; d < static_cast<size_t>(ET_MIN3(a.dim(), b.dim(), c.dim())); ++d) {
741741
ET_LOG(
742742
Error,
743743
" size(%zu): (%zu, %zu, %zu)",
@@ -800,7 +800,7 @@ inline bool tensors_have_same_strides(
800800
ET_TENSOR_CHECK_PREFIX__ ": dim=(%zu, %zu)",
801801
static_cast<size_t>(a.dim()),
802802
static_cast<size_t>(b.dim()));
803-
for (size_t d = 0; d < ET_MIN2(a.dim(), b.dim()); ++d) {
803+
for (size_t d = 0; d < static_cast<size_t>(ET_MIN2(a.dim(), b.dim())); ++d) {
804804
ET_LOG(
805805
Error,
806806
" stride(%zu): (%zu, %zu)",
@@ -825,7 +825,7 @@ inline bool tensors_have_same_strides(
825825
static_cast<size_t>(a.dim()),
826826
static_cast<size_t>(b.dim()),
827827
static_cast<size_t>(c.dim()));
828-
for (size_t d = 0; d < ET_MIN3(a.dim(), b.dim(), c.dim()); ++d) {
828+
for (size_t d = 0; d < static_cast<size_t>(ET_MIN3(a.dim(), b.dim(), c.dim())); ++d) {
829829
ET_LOG(
830830
Error,
831831
" stride(%zu): (%zu, %zu, %zu)",
@@ -909,7 +909,7 @@ inline size_t getTrailingDims(
909909
dim,
910910
ssize_t(tensor.dim()));
911911
size_t dims = 1;
912-
for (size_t i = dim + 1; i < tensor.dim(); ++i) {
912+
for (size_t i = dim + 1; i < static_cast<size_t>(tensor.dim()); ++i) {
913913
dims *= static_cast<size_t>(tensor.size(i));
914914
}
915915
return dims;
@@ -982,7 +982,7 @@ inline void indexToCoordinate(
982982
const executorch::aten::Tensor& tensor,
983983
size_t index,
984984
size_t* coordinate) {
985-
ET_CHECK(index < tensor.numel());
985+
ET_CHECK(index < static_cast<size_t>(tensor.numel()));
986986
for (auto i = 0; i < tensor.dim(); ++i) {
987987
auto dim = tensor.dim() - 1 - i;
988988
size_t dim_size = tensor.size(dim);

0 commit comments

Comments
 (0)