Skip to content

Commit 7e3ec96

Browse files
dbortfacebook-github-bot
authored andcommitted
Remove references to exec_aten::RuntimeContext (#5257)
Summary: Pull Request resolved: #5257 RuntimeContext should never have been in that namespace since it's not an ATen type. There are still other internal users outside of //executorch, but the important thing right now is that we set good examples for OSS users. Reviewed By: manuelcandales Differential Revision: D62478758 fbshipit-source-id: 2c0c753a5ef7a766fdde4dd796c12eedce312868
1 parent de30572 commit 7e3ec96

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+52
-51
lines changed

extension/llm/custom_ops/op_sdpa_aot.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ Tensor& sdpa_with_kv_cache_out_no_context(
3333
// @lint-ignore CLANGTIDY facebook-hte-ParameterMightThrowOnCopy
3434
const optional<double> scale,
3535
Tensor& output) {
36-
exec_aten::RuntimeContext context{};
36+
executorch::runtime::KernelRuntimeContext context{};
3737
return torch::executor::native::sdpa_with_kv_cache_out(
3838
context,
3939
q_projected,

extension/llm/custom_ops/op_sdpa_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ exec_aten::Tensor op_scaled_dot_product_attention(
2828
bool is_causal,
2929
exec_aten::optional<double> scale,
3030
exec_aten::Tensor& out) {
31-
exec_aten::RuntimeContext context{};
31+
executorch::runtime::KernelRuntimeContext context{};
3232
return torch::executor::native::flash_attention_kernel_out(
3333
context, query, key, value, attn_mask, dropout_p, is_causal, scale, out);
3434
}

extension/llm/custom_ops/op_sdpa_with_kv_cache_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ exec_aten::Tensor op_sdpa_with_kv_cache(
3131
bool is_causal,
3232
exec_aten::optional<double> scale,
3333
exec_aten::Tensor& out) {
34-
exec_aten::RuntimeContext context{};
34+
executorch::runtime::KernelRuntimeContext context{};
3535
return torch::executor::native::sdpa_with_kv_cache_out(
3636
context,
3737
query,

kernels/aten/cpu/op__to_dim_order_copy.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ Tensor& _to_dim_order_copy_out(
115115
bool non_blocking,
116116
OptionalArrayRef<int64_t> dim_order,
117117
Tensor& out) {
118-
exec_aten::RuntimeContext ctx{};
118+
executorch::runtime::KernelRuntimeContext ctx{};
119119
return _to_dim_order_copy_out(ctx, self, non_blocking, dim_order, out);
120120
}
121121

kernels/optimized/cpu/op_gelu.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ namespace {
3838
*/
3939
template <typename CTYPE>
4040
void gelu(
41-
exec_aten::RuntimeContext& context,
41+
executorch::runtime::KernelRuntimeContext& context,
4242
const Tensor& input,
4343
string_view approximate,
4444
Tensor& output) {

kernels/portable/cpu/op__to_dim_order_copy.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ Tensor& _to_dim_order_copy_out(
118118
bool non_blocking,
119119
OptionalArrayRef<int64_t> dim_order,
120120
Tensor& out) {
121-
exec_aten::RuntimeContext context{};
121+
executorch::runtime::KernelRuntimeContext context{};
122122
return _to_dim_order_copy_out(context, self, non_blocking, dim_order, out);
123123
}
124124

kernels/portable/test/op_gelu_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ using torch::executor::testing::TensorFactory;
2525
// executorch/kernels/test/op_gelu_test.cpp instead.
2626

2727
Tensor& op_gelu_out(const Tensor& self, string_view approximate, Tensor& out) {
28-
exec_aten::RuntimeContext context{};
28+
executorch::runtime::KernelRuntimeContext context{};
2929
return torch::executor::native::gelu_out(context, self, approximate, out);
3030
}
3131

kernels/quantized/test/op_add_test.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@
2121
using namespace ::testing;
2222
using exec_aten::ArrayRef;
2323
using exec_aten::optional;
24-
using exec_aten::RuntimeContext;
2524
using exec_aten::Scalar;
2625
using exec_aten::ScalarType;
2726
using exec_aten::Tensor;
27+
using executorch::runtime::KernelRuntimeContext;
2828
using torch::executor::native::add_out;
2929
using torch::executor::native::dequantize_per_tensor_out;
3030
using torch::executor::native::quantize_per_tensor_out;
@@ -193,7 +193,7 @@ TEST(OpQuantizeAddTest, ConsitencyWithReferencePattern) {
193193

194194
optional<ScalarType> out_dtype = optional<ScalarType>();
195195

196-
RuntimeContext context{};
196+
KernelRuntimeContext context{};
197197
// q -> qadd -> dq
198198
// 3.5 / 0.5 + 1 = 8
199199
quantize_per_tensor_out(

kernels/quantized/test/op_embedding4b_test.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@
1919
using namespace ::testing;
2020
using exec_aten::ArrayRef;
2121
using exec_aten::optional;
22-
using exec_aten::RuntimeContext;
2322
using exec_aten::ScalarType;
2423
using exec_aten::Tensor;
24+
using executorch::runtime::KernelRuntimeContext;
2525
using torch::executor::native::quantized_embedding_4bit_out;
2626

2727
using torch::executor::testing::TensorFactory;
@@ -62,7 +62,7 @@ TEST(OpQuantizedEmbedding4bTest, TestGroupWiseQuantizedEmbedding) {
6262
EXPECT_TENSOR_EQ(out, expected);
6363

6464
out = tf.zeros({3, 4});
65-
auto context = RuntimeContext();
65+
auto context = KernelRuntimeContext();
6666
torch::executor::native::quantized_embedding_4bit_out(
6767
context,
6868
qweight,

kernels/quantized/test/op_embedding_test.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@
2121
using namespace ::testing;
2222
using exec_aten::ArrayRef;
2323
using exec_aten::optional;
24-
using exec_aten::RuntimeContext;
2524
using exec_aten::Scalar;
2625
using exec_aten::ScalarType;
2726
using exec_aten::Tensor;
27+
using executorch::runtime::KernelRuntimeContext;
2828
using torch::executor::native::dequantize_per_tensor_out;
2929
using torch::executor::native::embedding_out;
3030
using torch::executor::native::quantize_per_tensor_out;
@@ -120,7 +120,7 @@ TEST(OpQuantizedEmbeddingTest, ConsitencyWithReferencePattern) {
120120

121121
TensorFactory<ScalarType::Byte> tfo;
122122
Tensor qweight = tfo.zeros({3, 1});
123-
RuntimeContext context{};
123+
KernelRuntimeContext context{};
124124
// 3.5 / 0.5 + 1 = 8
125125
// 5.5 / 0.5 + 1 = 12
126126
// 1 / 0.5 + 1 = 3

0 commit comments

Comments
 (0)