Skip to content

Commit 734d8d6

Browse files
zonglinpengfacebook-github-bot
authored andcommitted
patch op name discrepancy in oss
Summary: 1. Few of the operators are renamed: quantized_relu_out --> quantized_relu_per_tensor_out where_out --> where_self_out mean_dim_out --> mean softmax_out --> __softmax_out clamp_tensor_out --> clamp_Tensor_out corresponding .yml (functions_hifi.yaml) needs to be updated. 2. Name changes of quantize files from quantized_linear_out.cpp, quantized_layer_norm.cpp,quantized_fully_connected_out.cpp, quantize_per_tensor.cpp,quantized_relu_out.cpp,dequantize_per_tensor.cpp to op_quantized_linear_out.cpp, op_quantized_layer_norm.cpp,op_quantized_fully_connected_out.cpp, op_quantize_per_tensor.cpp,op_quantized_relu_out.cpp,op_dequantize_per_tensor.cpp CMakeLists.txt has to be changed to change the file names. 3. yml file has to be changed for quantized kernels to use per_tensor_out versions. Differential Revision: D69568677
1 parent 433e30b commit 734d8d6

File tree

6 files changed

+82
-2
lines changed

6 files changed

+82
-2
lines changed

backends/cadence/aot/functions_hifi.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,11 +204,21 @@
204204
- arg_meta: null
205205
kernel_name: cadence::impl::HiFi::quantized_linear_out
206206

207+
- func: cadence::quantized_linear.per_tensor_out(Tensor src, Tensor weight, Tensor bias, SymInt src_zero_point, SymInt weight_zero_point, SymInt out_multiplier, SymInt out_shift, SymInt out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!)
208+
kernels:
209+
- arg_meta: null
210+
kernel_name: cadence::impl::HiFi::quantized_linear_per_tensor_out
211+
207212
- func: cadence::quantized_relu.out(Tensor X, Tensor X_zero_point, int out_zero_point, Tensor out_multiplier, Tensor out_shift, *, Tensor(a!) out) -> Tensor(a!)
208213
kernels:
209214
- arg_meta: null
210215
kernel_name: cadence::impl::HiFi::quantized_relu_out
211216

217+
- func: cadence::quantized_relu.per_tensor_out(Tensor X, int X_zero_point, int out_zero_point, int out_multiplier, int out_shift, *, Tensor(a!) out) -> Tensor(a!)
218+
kernels:
219+
- arg_meta: null
220+
kernel_name: cadence::impl::HiFi::quantized_relu_per_tensor_out
221+
212222
- func: cadence::quantized_linear.per_tensor_out(Tensor src, Tensor weight, Tensor bias, SymInt src_zero_point, SymInt weight_zero_point, SymInt out_multiplier, SymInt out_shift, SymInt out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!)
213223
kernels:
214224
- arg_meta: null

backends/cadence/hifi/operators/CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,8 @@ target_include_directories(
7676

7777
# Custom ops that are needed to run the test model.
7878
add_library(
79-
custom_ops "quantized_linear_out.cpp" "quantized_layer_norm.cpp"
80-
"quantize_per_tensor.cpp" "quantized_relu_out.cpp" "dequantize_per_tensor.cpp"
79+
custom_ops "op_quantized_linear_out.cpp" "op_quantized_layer_norm.cpp"
80+
"op_quantize_per_tensor.cpp" "op_quantized_relu_out.cpp" "op_dequantize_per_tensor.cpp"
8181
)
8282
target_include_directories(
8383
custom_ops PUBLIC ${ROOT_DIR}/.. ${CMAKE_BINARY_DIR}

backends/cadence/hifi/operators/op_clamp.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,17 @@ Tensor& clamp_Tensor_out(
321321

322322
return out;
323323
}
324+
325+
Tensor& clamp_tensor_out(
326+
RuntimeContext& ctx,
327+
const Tensor& in,
328+
const executorch::aten::optional<Tensor>& min_opt,
329+
const executorch::aten::optional<Tensor>& max_opt,
330+
Tensor& out) {
331+
clamp_Tensor_out(ctx, in, min_opt, max_opt, out);
332+
}
333+
334+
324335
} // namespace native
325336
} // namespace HiFi
326337
} // namespace impl

backends/cadence/hifi/operators/op_mean.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,16 @@ Tensor& mean_out(
168168
return out;
169169
}
170170

171+
Tensor& mean_dim_out(
172+
RuntimeContext& ctx,
173+
const Tensor& in,
174+
optional<ArrayRef<int64_t>> dim_list,
175+
bool keepdim,
176+
optional<ScalarType> dtype,
177+
Tensor& out) {
178+
mean_out(ctx, in, dim_list, keepdim, dtype, out);
179+
}
180+
171181
} // namespace native
172182
} // namespace HiFi
173183
} // namespace impl

backends/cadence/hifi/operators/op_quantized_relu_out.cpp

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,46 @@ void quantized_relu_per_tensor_out(
7575
}
7676
}
7777

78+
void quantized_relu_per_tensor_out(
79+
KernelRuntimeContext& ctx,
80+
const Tensor& input,
81+
const Tensor& in_zero_point,
82+
const int64_t out_zero_point,
83+
const Tensor& out_multiplier,
84+
const Tensor& out_shift,
85+
Tensor& output) {
86+
int8_t _in_zero_point = in_zero_point.const_data_ptr<int8_t>()[0];
87+
int32_t _out_multiplier = out_multiplier.const_data_ptr<int32_t>()[0];
88+
int32_t _out_shift = out_shift.const_data_ptr<int32_t>()[0];
89+
90+
quantized_relu_per_tensor_out(
91+
ctx,
92+
input,
93+
_in_zero_point,
94+
out_zero_point,
95+
_out_multiplier,
96+
_out_shift,
97+
output);
98+
}
99+
100+
void quantized_relu_out(
101+
KernelRuntimeContext& ctx,
102+
const Tensor& input,
103+
const int64_t in_zero_point,
104+
const int64_t out_zero_point,
105+
const int64_t out_multiplier,
106+
const int64_t out_shift,
107+
Tensor& output) {
108+
quantized_relu_per_tensor_out(
109+
ctx,
110+
input,
111+
in_zero_point,
112+
out_zero_point,
113+
out_multiplier,
114+
out_shift,
115+
output);
116+
}
117+
78118
} // namespace native
79119
} // namespace HiFi
80120
} // namespace impl

backends/cadence/hifi/operators/op_softmax.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,15 @@ Tensor& _softmax_out(
194194
return out;
195195
}
196196

197+
Tensor& softmax_out(
198+
KernelRuntimeContext& ctx,
199+
const Tensor& in,
200+
int64_t dim,
201+
bool half_to_float,
202+
Tensor& out) {
203+
_softmax_out(ctx, in, dim, half_to_float, out);
204+
}
205+
197206
} // namespace native
198207
} // namespace HiFi
199208
} // namespace impl

0 commit comments

Comments
 (0)