Skip to content

Commit 72b08d8

Browse files
committed
Move tensor_shape_to_c_string back to runtime/core
I moved it to kernels/portable due to build issues, but it doesn't make sense that kernels/portable code can't use this if it's in runtime/core, so let's dig in. ghstack-source-id: 86ec3ad ghstack-comment-id: 2641470188 Pull Request resolved: #8296
1 parent ac9d41d commit 72b08d8

16 files changed

+123
-99
lines changed

kernels/portable/cpu/util/broadcast_util.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77
*/
88

99
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
10-
#include <executorch/kernels/portable/cpu/util/tensor_util.h>
1110
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1211
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
12+
#include <executorch/runtime/core/exec_aten/util/tensor_shape_to_c_string.h>
1313
#include <string.h>
1414

1515
namespace torch {

kernels/portable/cpu/util/targets.bzl

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,8 @@ def define_common_targets():
7373
compiler_flags = ["-Wno-missing-prototypes"],
7474
deps = [
7575
":repeat_util",
76-
":tensor_util",
7776
"//executorch/runtime/kernel:kernel_includes",
77+
"//executorch/runtime/core/exec_aten/util:tensor_shape_to_c_string",
7878
"//executorch/runtime/core/exec_aten/util:tensor_util",
7979
],
8080
visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/optimized/cpu/...", "@EXECUTORCH_CLIENTS"],
@@ -268,17 +268,6 @@ def define_common_targets():
268268
visibility = ["//executorch/kernels/portable/cpu/..."],
269269
)
270270

271-
runtime.cxx_library(
272-
name = "tensor_util",
273-
srcs = ["tensor_util.cpp"],
274-
exported_headers = ["tensor_util.h"],
275-
deps = [
276-
"//executorch/runtime/core/exec_aten:lib",
277-
"//executorch/runtime/kernel:kernel_includes",
278-
],
279-
visibility = ["//executorch/kernels/portable/cpu/..."],
280-
)
281-
282271
runtime.cxx_library(
283272
name = "upsample_util",
284273
srcs = ["upsample_util.cpp"],

kernels/portable/cpu/util/test/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../../../..)
1919

2020
include(${EXECUTORCH_ROOT}/build/Test.cmake)
2121

22-
set(_test_srcs broadcast_test.cpp reduce_test.cpp tensor_util_test.cpp)
22+
set(_test_srcs broadcast_test.cpp reduce_test.cpp)
2323

2424
et_cxx_test(
2525
kernels_portable_cpu_util_test SOURCES ${_test_srcs} EXTRA_LIBS

kernels/portable/cpu/util/test/targets.bzl

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,3 @@ def define_common_targets():
2121
"//executorch/kernels/portable/cpu/util:reduce_util",
2222
],
2323
)
24-
25-
runtime.cxx_test(
26-
name = "tensor_util_test",
27-
srcs = ["tensor_util_test.cpp"],
28-
deps = [
29-
"//executorch/kernels/portable/cpu/util:tensor_util",
30-
],
31-
)

runtime/core/exec_aten/util/dim_order_util.h

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -260,35 +260,6 @@ ET_NODISCARD inline Error stride_to_dim_order(
260260
return Error::Ok;
261261
}
262262

263-
/**
264-
* Print a string representation of an ArrayRef of tensor sizes into a
265-
* user-provided string buffer. If the user buffer is too small, the string
266-
* will be truncated. The output is of the format (1,2,3,4).
267-
*
268-
* Note that we cannot use ArrayRef here due to a circular dependency (see
269-
* above comments).
270-
*/
271-
template <class SizesType>
272-
inline void sizes_to_string(
273-
char* output,
274-
size_t output_size,
275-
SizesType* sizes,
276-
size_t rank) {
277-
auto remaining_size = output_size;
278-
for (auto i = 0; remaining_size > 0 && i < rank; i++) {
279-
snprintf(
280-
output,
281-
remaining_size,
282-
"%s%zd",
283-
i == 0 ? "(" : ",",
284-
static_cast<size_t>(sizes[i]));
285-
auto len = strlen(output);
286-
output += len;
287-
remaining_size -= len;
288-
}
289-
snprintf(output, remaining_size, ")");
290-
}
291-
292263
} // namespace runtime
293264
} // namespace executorch
294265

runtime/core/exec_aten/util/targets.bzl

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,3 +72,26 @@ def define_common_targets():
7272
# specify library directory path.
7373
force_static = True,
7474
)
75+
76+
runtime.cxx_library(
77+
name = "tensor_shape_to_c_string" + aten_suffix,
78+
srcs = ["tensor_shape_to_c_string.cpp"],
79+
exported_deps = [
80+
"//executorch/runtime/core:core",
81+
"//executorch/runtime/core/exec_aten/util:tensor_dimension_limit",
82+
],
83+
exported_headers = ["tensor_shape_to_c_string.h"],
84+
visibility = [
85+
"//executorch/...",
86+
"@EXECUTORCH_CLIENTS",
87+
],
88+
)
89+
90+
runtime.cxx_library(
91+
name = "tensor_dimension_limit",
92+
exported_headers = ["tensor_dimension_limit.h"],
93+
visibility = [
94+
"//executorch/...",
95+
"@EXECUTORCH_CLIENTS",
96+
],
97+
)
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#pragma once
10+
11+
namespace executorch::runtime {
12+
/**
13+
* The expected output size may not be the existing size of any inputs and
14+
* outputs if the operator supports both broadcast and dynamic shape.
15+
* Therefore such operators needs extra space to store the calculated expected
16+
* output size. such dynamic allocation is troublesome in executorch so we can
17+
* just hard code a static value of a relatively small value because users
18+
* don't create high dimensional tensors.
19+
*/
20+
constexpr size_t kTensorDimensionLimit = 16;
21+
} // namespace executorch::runtime

kernels/portable/cpu/util/tensor_util.cpp renamed to runtime/core/exec_aten/util/tensor_shape_to_c_string.cpp

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,19 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/kernels/portable/cpu/util/tensor_util.h>
10-
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
9+
#include <executorch/runtime/core/exec_aten/util/tensor_shape_to_c_string.h>
10+
1111
#include <executorch/runtime/platform/assert.h>
1212

13+
#include <cinttypes>
14+
#include <cstdio> // For snprintf.
15+
#include <cstring>
16+
1317
namespace executorch::runtime {
14-
/**
15-
* Shared implementation for tensor_util.h, may only contain code that
16-
* works whether or not ATen mode is active.
17-
*/
18-
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
19-
executorch::runtime::Span<const executorch::aten::SizesType> shape) {
18+
namespace {
19+
template <typename SizesType>
20+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string_impl(
21+
executorch::runtime::Span<SizesType> shape) {
2022
std::array<char, kTensorShapeStringSizeLimit> out;
2123
char* p = out.data();
2224
if ET_UNLIKELY (shape.size() > kTensorDimensionLimit) {
@@ -48,5 +50,16 @@ std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
4850
*(p - 1) = '\0';
4951
return out;
5052
}
53+
} // namespace
54+
55+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
56+
executorch::runtime::Span<const std::int32_t> shape) {
57+
return tensor_shape_to_c_string_impl(shape);
58+
}
59+
60+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
61+
executorch::runtime::Span<const std::int64_t> shape) {
62+
return tensor_shape_to_c_string_impl(shape);
63+
}
5164

5265
} // namespace executorch::runtime

kernels/portable/cpu/util/tensor_util.h renamed to runtime/core/exec_aten/util/tensor_shape_to_c_string.h

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,10 @@
1010

1111
#include <array>
1212
#include <cstddef>
13+
#include <cstdint>
1314
#include <limits>
1415

15-
#include <executorch/runtime/core/exec_aten/exec_aten.h>
16-
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
16+
#include <executorch/runtime/core/exec_aten/util/tensor_dimension_limit.h>
1717
#include <executorch/runtime/core/span.h>
1818

1919
namespace executorch::runtime {
@@ -34,18 +34,35 @@ constexpr size_t kTensorShapeStringSizeLimit = 1 + /* opening parenthesis */
3434

3535
namespace internal {
3636
constexpr size_t kMaximumPrintableTensorShapeElement =
37-
std::is_same_v<executorch::aten::SizesType, int32_t>
38-
? std::numeric_limits<int32_t>::max()
39-
: std::numeric_limits<uint32_t>::max();
37+
std::numeric_limits<int32_t>::max();
4038
} // namespace internal
4139

4240
/**
4341
* Convert a shape to a NUL-terminated C string with limited size. If
4442
* elements of the shape are larger than
4543
* kMaximumPrintableTensorShapeElement, those elements will be
4644
* rendered as ERR instead.
45+
*
46+
* NOTE: There are two overloads of this function to support both ATen
47+
* tensors and ExecuTorch Tensors, which have different SizesType,
48+
* while also avoiding a dependency on exec_aten.h from this header
49+
* because that would cause a circular dependency.
50+
*/
51+
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
52+
executorch::runtime::Span<const std::int32_t> shape);
53+
54+
/**
55+
* Convert a shape to a NUL-terminated C string with limited size. If
56+
* elements of the shape are larger than
57+
* kMaximumPrintableTensorShapeElement, those elements will be
58+
* rendered as ERR instead.
59+
*
60+
* NOTE: There are two overloads of this function to support both ATen
61+
* tensors and ExecuTorch Tensors, which have different SizesType,
62+
* while also avoiding a dependency on exec_aten.h from this header
63+
* because that would cause a circular dependency.
4764
*/
4865
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
49-
executorch::runtime::Span<const executorch::aten::SizesType> shape);
66+
executorch::runtime::Span<const std::int64_t> shape);
5067

5168
} // namespace executorch::runtime

runtime/core/exec_aten/util/tensor_util.h

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <executorch/runtime/core/exec_aten/exec_aten.h>
2121
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
2222
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
23+
#include <executorch/runtime/core/exec_aten/util/tensor_dimension_limit.h>
2324
#include <executorch/runtime/core/span.h>
2425
#include <executorch/runtime/platform/assert.h>
2526
#include <executorch/runtime/platform/compiler.h>
@@ -893,16 +894,6 @@ inline bool tensor_is_scalar(executorch::aten::Tensor t) {
893894
return t.dim() == 0 && t.numel() == 1;
894895
}
895896

896-
/**
897-
* The expected output size may not be the existing size of any inputs and
898-
* outputs if the operator supports both broadcast and dynamic shape.
899-
* Therefore such operators needs extra space to store the calculated expected
900-
* output size. such dynamic allocation is troublesome in executorch so we can
901-
* just hard code a static value of a relatively small value because users
902-
* don't create high dimensional tensors.
903-
*/
904-
constexpr size_t kTensorDimensionLimit = 16;
905-
906897
/// Returns the product of dim[0:dim), not including dim.
907898
inline size_t getLeadingDims(
908899
const executorch::aten::Tensor& tensor,

0 commit comments

Comments
 (0)