diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp index c0449477b2185d..e3d202dfe73c70 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp @@ -24,8 +24,7 @@ void ACLScheduler::set_num_threads(unsigned int num_threads) {} void ACLScheduler::schedule_custom(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) { const Window & max_window = window; - const unsigned int num_iterations = - max_window.num_iterations(hints.split_dimension()) == 1 ? 1 : max_window.num_iterations_total(); + const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension()); const auto _num_threads = std::min(num_iterations, static_cast(parallel_get_num_threads())); if (num_iterations < 1) { diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index 74b65c2a88406d..eaeb4b4aba3f23 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -516,6 +516,10 @@ void RNN::configurePortDataTypes() { // onednn doesn't have fp16 instance inDataTypes[xIdx] = outDataTypes[yIdx] = outDataTypes[hoIdx] = inDataTypes[hIdx] = memory::data_type::f32; // required by oneDNN. + // OneDNN unsupported fp16 precision for this layer + if (cell_type == dnnl::algorithm::vanilla_augru && inDataTypes[aIdx] == memory::data_type::f16) + inDataTypes[aIdx] = memory::data_type::f32; + if (outDataTypes[yIdx] == memory::data_type::bf16 && one_of(inDataTypes[xIdx], memory::data_type::s8, memory::data_type::u8)) outDataTypes[yIdx] = memory::data_type::f32; // oneDNN does not support bf16 output precision for quantized rnn primitive yet }