Skip to content

Address various warnings as errors #8581

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions extension/threadpool/cpuinfo_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <executorch/extension/threadpool/cpuinfo_utils.h>

#include <fstream>
Expand Down Expand Up @@ -84,7 +85,7 @@ bool populate_available_cpu_mids() {
cpu_midrs->resize(num_possible_cores);
const std::string kMidrFilePathPrefix = "/sys/devices/system/cpu/cpu";
const std::string kMidrFilePathSuffix = "/regs/identification/midr_el1";
for (int32_t i = 0; i < num_possible_cores; ++i) {
for (const auto i : c10::irange(num_possible_cores)) {
std::string midr_file_path =
kMidrFilePathPrefix + std::to_string(i) + kMidrFilePathSuffix;
ET_LOG(Info, "Reading file %s", midr_file_path.c_str());
Expand Down Expand Up @@ -115,7 +116,7 @@ uint32_t _get_num_performant_cores() {
ET_LOG(Info, "CPU info and manual query on # of cpus dont match.");
return 0;
}
for (int32_t i = 0; i < cpu_midrs->size(); ++i) {
for (const auto i : c10::irange(cpu_midrs->size())) {
uint32_t masked_midr = (*cpu_midrs)[i] & RIVISION_MASK;
switch (masked_midr) {
case CPUINFO_ARM_MIDR_CORTEX_A520:
Expand Down Expand Up @@ -148,7 +149,7 @@ uint32_t get_num_performant_cores() {
uint32_t num_possible_cores = cpuinfo_get_processors_count();
uint32_t num_non_performant_core = 0;
if (uarch_count > 1) {
for (int32_t i = 0; i < uarch_count; ++i) {
for (const auto i : c10::irange(uarch_count)) {
const struct cpuinfo_uarch_info* uarch_info = cpuinfo_get_uarch(i);
if (is_non_performant_core(uarch_info)) {
num_non_performant_core += uarch_info->processor_count;
Expand Down
1 change: 1 addition & 0 deletions extension/threadpool/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def define_common_targets():
srcs = _THREADPOOL_SRCS,
deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/core/portable_type/c10/c10:c10",
],
exported_headers = _THREADPOOL_HEADERS,
exported_deps = [
Expand Down
6 changes: 4 additions & 2 deletions kernels/portable/cpu/op__to_dim_order_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>

#include <executorch/kernels/portable/cpu/scalar_utils.h>
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
Expand Down Expand Up @@ -41,7 +43,7 @@ int64_t coordinateToIndexWithDimOrder(

dim_order_to_stride_nocheck(
sizes.data(), dim_order.data(), sizes.size(), strides);
for (size_t i = 0; i < self.dim(); ++i) {
for (const auto i : c10::irange(self.dim())) {
index += cur_indices[i] * strides[i];
}
return index;
Expand All @@ -59,7 +61,7 @@ void _to_dim_order_copy_impl(const Tensor& self, Tensor& out) {
for (ssize_t i = 0; i < self.numel(); i++) {
// Update the current indices.
for (ssize_t j = self.dim() - 1; j >= 0; j--) {
if (coordinate[j] + 1 < self.size(j)) {
if (coordinate[j] + 1 < static_cast<size_t>(self.size(j))) {
coordinate[j]++;
break;
} else {
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_amax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <cmath>

#include <executorch/kernels/portable/cpu/util/reduce_util.h>
Expand Down Expand Up @@ -44,7 +45,7 @@ Tensor& amax_out(

ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amax.out", CTYPE, [&]() {
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
for (const auto out_ix : c10::irange(out.numel())) {
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
[](CTYPE v, CTYPE max_v) {
return std::isnan(v) || v > max_v ? v : max_v;
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_amin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <cmath>

#include <executorch/kernels/portable/cpu/util/reduce_util.h>
Expand Down Expand Up @@ -44,7 +44,7 @@ Tensor& amin_out(

ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amin.out", CTYPE, [&]() {
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
for (const auto out_ix : c10::irange(out.numel())) {
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
[](CTYPE v, CTYPE min_v) {
return std::isnan(v) || v < min_v ? v : min_v;
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_argmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <cmath>
#include <tuple>

Expand Down Expand Up @@ -46,7 +47,7 @@ Tensor& argmax_out(
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmax.out", CTYPE, [&] {
long* out_data = out.mutable_data_ptr<long>();

for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
for (const auto out_ix : c10::irange(out.numel())) {
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
if (!std::isnan(acc_val) && (std::isnan(v) || v > acc_val)) {
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_argmin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <cmath>
#include <tuple>

Expand Down Expand Up @@ -46,7 +47,7 @@ Tensor& argmin_out(
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmin.out", CTYPE, [&] {
long* out_data = out.mutable_data_ptr<long>();

for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
for (const auto out_ix : c10::irange(out.numel())) {
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
if (!std::isnan(acc_val) && (std::isnan(v) || v < acc_val)) {
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_expand_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ Tensor& expand_copy_out(

ET_KERNEL_CHECK(
ctx,
repeat_tensor(self, {repeats, repeats_size}, out) == Error::Ok,
repeat_tensor(self, makeArrayRef(repeats, repeats_size), out) ==
Error::Ok,
InvalidArgument,
out);

Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/util/activation_ops_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) {
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(in));

const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim;
const size_t dim_size = in.size(non_negative_dim);
const ssize_t dim_size = in.size(non_negative_dim);

ET_CHECK_OR_RETURN_FALSE(
dim_size % 2 == 0,
Expand Down
6 changes: 4 additions & 2 deletions kernels/portable/cpu/util/broadcast_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
Expand Down Expand Up @@ -274,7 +275,7 @@ void delinearize_index(
size_t* out_indexes,
const size_t out_indexes_len) {
ET_CHECK(shape.size() <= out_indexes_len);
for (auto i = 0; i < shape.size(); ++i) {
for (size_t i = 0; i < shape.size(); ++i) {
auto dim = shape.size() - 1 - i;
auto dim_size = shape[dim];
out_indexes[dim] = linear_index % dim_size;
Expand Down Expand Up @@ -304,7 +305,8 @@ size_t linearize_access_indexes(
size_t linear_index = 0;
for (size_t i = 0; i < indexes_broadcast_from.size(); ++i) {
// If this dimension is broadcasted, add zero to the linear address.
if (indexes_broadcast_from[i] >= broadcast_from_shape[i]) {
if (indexes_broadcast_from[i] >=
static_cast<size_t>(broadcast_from_shape[i])) {
ET_CHECK_MSG(
broadcast_from_shape[i] == 1,
"Expected dim size == 1 if broadcasted, but actual dim size is %zu",
Expand Down
7 changes: 4 additions & 3 deletions kernels/portable/cpu/util/copy_ops_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#pragma once
#include <c10/util/irange.h>

#include <executorch/runtime/kernel/kernel_includes.h>

Expand All @@ -26,16 +27,16 @@ void _as_strided_copy(
ArrayRef<int64_t> stride,
int64_t dim) {
// the last dimension, copy data
if (dim == size.size() - 1) {
for (size_t i = 0; i < size.at(dim); ++i) {
if (dim == static_cast<int64_t>(size.size()) - 1) {
for (const auto i : c10::irange(size.at(dim))) {
output_data[i] = *input_data;
input_data += stride.at(dim);
}
return;
}
size_t trailing_dims = getTrailingDims(out, dim);
// recursively set data for the next dimension
for (size_t i = 0; i < size.at(dim); ++i) {
for ([[maybe_unused]] const auto i : c10::irange(size.at(dim))) {
_as_strided_copy<CTYPE>(
input_data, output_data, out, size, stride, dim + 1);
input_data += stride.at(dim);
Expand Down
8 changes: 5 additions & 3 deletions kernels/portable/cpu/util/functional_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

#pragma once

#include <c10/util/irange.h>

#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>

Expand All @@ -30,7 +32,7 @@ inline CTYPE apply_unary_reduce_fn(
const int64_t size,
const int64_t stride = 1) {
CTYPE acc_val = data_in[0];
for (size_t i = 1; i < size; i++) {
for (const auto i : c10::irange(1, size)) {
acc_val = reduce_fun(data_in[i * stride], acc_val);
}
return acc_val;
Expand All @@ -51,7 +53,7 @@ inline void apply_unary_map_fn(
CTYPE_OUT* const data_out,
const int64_t size,
const int64_t stride = 1) {
for (size_t i = 0; i < size; i++) {
for (const auto i : c10::irange(size)) {
data_out[i * stride] = map_fun(data_in[i * stride]);
}
}
Expand All @@ -77,7 +79,7 @@ inline CTYPE_OUT apply_unary_map_reduce_fn(
const int64_t size,
const int64_t stride = 1) {
CTYPE_OUT acc_val = map_fun(data_in[0]);
for (size_t i = 1; i < size; ++i) {
for (const auto i : c10::irange(1, size)) {
acc_val = reduce_fun(map_fun(data_in[i * stride]), acc_val);
}
return acc_val;
Expand Down
19 changes: 10 additions & 9 deletions kernels/portable/cpu/util/reduce_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ ET_NODISCARD bool check_dim_list_is_valid(
}

const size_t non_neg_d = _normalize_non_neg_d(d, in.dim());
ET_LOG_AND_RETURN_IF_FALSE(
non_neg_d < kTensorDimensionLimit && non_neg_d >= 0);
ET_LOG_AND_RETURN_IF_FALSE(non_neg_d < kTensorDimensionLimit);

ET_CHECK_OR_RETURN_FALSE(
dim_exist[non_neg_d] == false,
Expand Down Expand Up @@ -86,7 +85,7 @@ size_t get_reduced_dim_product(
}
size_t dim_product = 1;
if (!dim.has_value()) {
for (size_t i = 0; i < in.dim(); ++i) {
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
dim_product *= in.size(i);
}
return dim_product;
Expand All @@ -108,7 +107,7 @@ size_t get_reduced_dim_product(
size_t dim_product = 1;
const size_t in_dim = in.dim();
if (!dim_list.has_value() || dim_list.value().size() == 0) {
for (size_t i = 0; i < in.dim(); ++i) {
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
dim_product *= in.size(i);
}
return dim_product;
Expand Down Expand Up @@ -136,7 +135,7 @@ size_t get_out_numel(
ET_CHECK_VALID_DIM(dim_val, in.dim());
}
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in.dim());
for (size_t d = 0; d < in.dim(); ++d) {
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
if (d != non_neg_dim) {
out_numel *= in.size(d);
}
Expand All @@ -155,7 +154,7 @@ size_t get_out_numel(
dim_list) {
size_t out_numel = 1;
if (dim_list.has_value() && dim_list.value().size() != 0) {
for (size_t d = 0; d < in.dim(); ++d) {
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
if (!check_dim_in_dim_list(d, in.dim(), dim_list.value())) {
out_numel *= in.size(d);
}
Expand Down Expand Up @@ -234,7 +233,7 @@ size_t compute_reduced_out_size(
if (dim.has_value()) {
const auto dim_val = dim.value();
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in_dim);
for (ssize_t i = 0; i < non_neg_dim; ++i) {
for (size_t i = 0; i < non_neg_dim; ++i) {
sizes_arr[i] = in.size(i);
}
if (keepdim) {
Expand All @@ -250,7 +249,7 @@ size_t compute_reduced_out_size(
}
} else {
if (keepdim) {
for (size_t i = 0; i < in_dim; ++i) {
for (size_t i = 0; i < static_cast<size_t>(in_dim); ++i) {
sizes_arr[i] = 1;
}
} else {
Expand All @@ -266,7 +265,9 @@ size_t compute_reduced_out_size(
dim_list,
bool keepdim,
executorch::aten::SizesType* sizes_arr) {
const auto in_dim = in.dim();
// check_dim_in_dim_list and later comparisons
// expect in_dim to be size_t, so cast it here
const size_t in_dim = static_cast<size_t>(in.dim());
size_t out_dim = in_dim;

if (dim_list.has_value() && dim_list.value().size() != 0) {
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/util/reduce_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ void apply_on_flat_ix_with_dim_mask_and_base(
const size_t start,
const size_t end) {
// Compute innermost dim from dim list
size_t inner_dim = in.dim() - 1;
int64_t inner_dim = in.dim() - 1;
while (!dim_mask[inner_dim]) {
inner_dim--;
}

// Initialize array of indices per dimension. This array is used to maintain
// the per-dimension index of the element in `in` that is being reduced over
// Only the dims that are in the dim list are relevant.
size_t dim_index[kTensorDimensionLimit];
int64_t dim_index[kTensorDimensionLimit];
for (int64_t d = 0; d < in.dim(); d++) {
dim_index[d] = 0;
}
Expand Down
Loading
Loading