Skip to content

Commit

Permalink
[core] Fix Coverity issues in core operators (openvinotoolkit#25178)
Browse files Browse the repository at this point in the history
### Details:
 - Fix issues reported by Coverity in core operators.
   Use move or reference to avoid not required object copies.

### Tickets:
- Part of
[CVS-141674](https://jira.devtools.intel.com/browse/CVS-141674)
  • Loading branch information
praasz authored Jun 24, 2024
1 parent 8a0d233 commit 4fac242
Show file tree
Hide file tree
Showing 18 changed files with 41 additions and 47 deletions.
2 changes: 1 addition & 1 deletion src/core/include/openvino/op/roi_align_rotated.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class OPENVINO_API ROIAlignRotated : public util::ROIAlignBase {
}

private:
bool m_clockwise_mode;
bool m_clockwise_mode{};
};
} // namespace v15
} // namespace op
Expand Down
10 changes: 5 additions & 5 deletions src/core/src/op/assign.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ void Assign::validate_and_infer_types() {
for (const auto& input : inputs()) {
start_nodes.push_back(input.get_source_output().get_node_shared_ptr());
}
auto nodes = topological_sort(start_nodes);
auto nodes = topological_sort(std::move(start_nodes));
for (const auto& node : nodes) {
if (auto read_value = ov::as_type_ptr<v3::ReadValue>(node)) {
if (read_value->get_variable_id() == m_variable_id)
Expand Down Expand Up @@ -109,15 +109,15 @@ bool Assign::evaluate(TensorVector& outputs,

auto& variable_context = const_cast<util::VariableContext&>(found_context->second.as<util::VariableContext>());

const auto& variable_values = variable_context.get_variable_values();
auto var_value = variable_context.get_variable_value(m_variable);

// automatically allocate memory if not provided by user
if (variable_values.find(m_variable) == variable_values.end()) {
if (!var_value) {
auto tensor = Tensor(m_variable->get_info().data_type, m_variable->get_info().data_shape.to_shape());
variable_context.set_variable_value(m_variable, std::make_shared<util::VariableValue>(tensor));
var_value = std::make_shared<util::VariableValue>(tensor);
variable_context.set_variable_value(m_variable, var_value);
}

const auto var_value = variable_values.find(m_variable)->second;
var_value->set_reset(false);
auto memory_buffer = var_value->get_state();
memory_buffer.set_shape(inputs[0].get_shape());
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/concat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ void Concat::validate_and_infer_types() {
input_shapes.push_back(get_input_partial_shape(i));
}

const auto output_shape = shape_infer(this, input_shapes).front();
const auto output_shapes = shape_infer(this, input_shapes);

set_output_type(0, inputs_et, output_shape);
set_output_type(0, inputs_et, output_shapes[0]);
}

std::shared_ptr<Node> Concat::clone_with_new_inputs(const OutputVector& new_args) const {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/convert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,8 @@ bool evaluate_bound(const Node* const node, TensorVector& output_values, const T
return false;

// dynamic values translation
auto input_dynamic_mask = Tensor(element::boolean, in_bound_shape);
auto outputs = TensorVector{input_dynamic_mask};
auto outputs = TensorVector{{element::boolean, in_bound_shape}};
const auto& input_dynamic_mask = outputs[0];

return v1::Equal().evaluate(outputs, {input_bound, input_max}) &&
v1::Select().evaluate(output_values, {input_dynamic_mask, output_max, output_values[0]});
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/eye.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ void Eye::validate_and_infer_types() {
input_et);
}

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_out_type(), output_shape);
const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_out_type(), output_shapes[0]);
}

bool Eye::visit_attributes(ov::AttributeVisitor& visitor) {
Expand Down
6 changes: 3 additions & 3 deletions src/core/src/op/if.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ static ov::PartialShape resolve_shape(const ov::PartialShape& then_pshape, const
return ov::PartialShape::dynamic();
}
}
std::vector<ov::Dimension> new_dims;
ov::PartialShape new_dims;

// If rangs are equal each dimesion of then_body output is union with each dimension of
// If ranges are equal each dimension of then_body output is union with each dimension of
// else_body
for (auto then_it = then_pshape.cbegin(), else_it = else_pshape.cbegin(); then_it != then_pshape.cend();
then_it++, else_it++) {
Expand All @@ -57,7 +57,7 @@ static ov::PartialShape resolve_shape(const ov::PartialShape& then_pshape, const
}
}

return ov::PartialShape(new_dims);
return new_dims;
}

bool ov::op::v8::If::visit_attributes(AttributeVisitor& visitor) {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/loop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,8 @@ void Loop::validate_and_infer_types() {
as_type_ptr<op::v0::TensorIterator::InvariantInputDescription>(input_description)) {
auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index);

auto input_partial_shape = input(index).get_partial_shape();
auto input_type = input(index).get_element_type();
const auto& input_partial_shape = input(index).get_partial_shape();
const auto& input_type = input(index).get_element_type();

body_parameter->set_partial_shape(input_partial_shape);
body_parameter->set_element_type(input_type);
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/max_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -204,9 +204,9 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const
const auto input_shapes = std::vector<PartialShape>{inputs[0].get_shape()};
auto pads_begin = m_pads_begin;
auto pads_end = m_pads_end;
const auto output_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front();
const auto output_shapes = shape_infer(this, input_shapes, pads_begin, pads_end);

outputs[0].set_shape(output_shape.get_shape());
outputs[0].set_shape(output_shapes[0].get_shape());
using namespace ov::element;
return IF_TYPE_OF_CONVERT_TENSORS(v1_MaxPool_evaluate,
this,
Expand Down
8 changes: 4 additions & 4 deletions src/core/src/op/mod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,8 @@ Tensor evaluate_undefined_result_mask(const TensorVector& bounds) {

const auto& in_et = bounds.front().get_element_type();

auto zero_t = ov::util::make_tensor_of_value(in_et, 0);
auto max_t = ov::util::make_tensor_of_max_value(in_et);
const auto zero_t = ov::util::make_tensor_of_value(in_et, 0);
const auto max_t = ov::util::make_tensor_of_max_value(in_et);

const auto& v_ub = bounds[1];
const auto& m_lb = bounds[2];
Expand Down Expand Up @@ -159,7 +159,7 @@ TensorVector get_bounds_with_valid_values(const TensorVector& bounds, const Tens

auto m_bounds = TensorVector();
m_bounds.reserve(bounds.size());
std::transform(bounds.cbegin(), bounds.cend(), std::back_inserter(m_bounds), [&](const Tensor& b) {
std::transform(bounds.cbegin(), bounds.cend(), std::back_inserter(m_bounds), [&](const Tensor& b) -> ov::Tensor {
auto tmp = TensorVector{{b.get_element_type(), mask.get_shape()}};
return select_op.evaluate(tmp, {mask, one_t, b}) ? tmp.front() : Tensor{};
});
Expand Down Expand Up @@ -205,7 +205,7 @@ bool evaluate_bound(const Node* const op, TensorVector& outputs, bool is_lower)
}
// Set undefined bound value for results which cannot be calculated.
const auto select_op = v1::Select();
const auto undefined_bound =
const auto& undefined_bound =
is_lower ? ov::util::make_tensor_of_value(in_et, 0) : ov::util::make_tensor_of_max_value(in_et);
return select_op.evaluate(outputs, {undefined_result_mask, undefined_bound, outputs.front()});
} else {
Expand Down
12 changes: 4 additions & 8 deletions src/core/src/op/non_zero.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,22 +80,18 @@ void NonZero::validate_and_infer_types() {
if (input_shape.rank().compatible(0)) {
set_output_type(0, m_output_type, PartialShape::dynamic(2));
} else {
auto dim = Dimension{0, 1};
auto output_shape = PartialShape{input_shape.rank(), {0, 1}};
auto& dim = output_shape[1];
for (auto&& d : input_shape)
dim *= d;
set_output_type(0, m_output_type, PartialShape{input_shape.rank(), dim});
set_output_type(0, m_output_type, output_shape);
}

set_input_is_relevant_to_shape(0);

if (const auto input_constant = ov::util::get_constant_from_source(input_value(0))) {
// input_value is available to calculate output shape

// const_cast of Constant data is needed to avoid obsolete copy of this data into the Tensor.
// It's safe here as evaluate() method doesn't modify input Tensors.
const auto inputs = TensorVector{{input_constant->get_element_type(),
input_constant->get_shape(),
const_cast<void*>(input_constant->get_data_ptr())}};
const auto inputs = TensorVector{input_constant->get_tensor_view()};
auto outputs = TensorVector{{m_output_type, {}}};
if (!evaluate(outputs, inputs))
return;
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/parameter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ bool AttributeAdapter<ParameterVector>::visit_attributes(AttributeVisitor& visit
}
visitor.on_attribute(index.str(), id);
if (!m_ref[i]) {
m_ref[i] = ov::as_type_ptr<op::v0::Parameter>(visitor.get_registered_node(id));
m_ref[i] = ov::as_type_ptr<op::v0::Parameter>(visitor.get_registered_node(std::move(id)));
}
}
return true;
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/result.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ bool AttributeAdapter<ResultVector>::visit_attributes(AttributeVisitor& visitor)
}
visitor.on_attribute(index.str(), id);
if (!m_ref[i]) {
m_ref[i] = as_type_ptr<op::v0::Result>(visitor.get_registered_node(id));
m_ref[i] = as_type_ptr<op::v0::Result>(visitor.get_registered_node(std::move(id)));
}
}
return true;
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/space_to_batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ bool evaluate(TensorVector& outputs, const TensorVector& inputs) {
pads_begin_vec,
pads_end_vec,
op::PadMode::CONSTANT);
data_shape = padded_shape;
data_shape = std::move(padded_shape);

Shape dispersed_shape(block_values_size + 1);
std::vector<size_t> axes_order(block_values_size + 1);
Expand Down
5 changes: 3 additions & 2 deletions src/core/src/op/type_relaxed.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,10 @@ void TypeRelaxedBase::restore_input_data_types(Node& node, const element::TypeVe
}
}

TemporaryReplaceOutputType::TemporaryReplaceOutputType(Output<Node> output, element::Type tmp_type) : m_output(output) {
TemporaryReplaceOutputType::TemporaryReplaceOutputType(Output<Node> output, element::Type tmp_type)
: m_output(std::move(output)),
orig_type(m_output.get_element_type()) {
// save original element type in order to restore it in the destructor
orig_type = m_output.get_element_type();
ov::descriptor::set_element_type(m_output.get_tensor(), tmp_type);
}

Expand Down
8 changes: 3 additions & 5 deletions src/core/src/op/unique.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,9 @@ void op::v10::Unique::validate_and_infer_types() {
rev_idx_size = Dimension{dim_at_axis.get_max_length()};
}

auto output_shape = input_shape;
output_shape[normalized_axis] = output_dim_at_axis;
output_shapes[0] = output_shape;

output_shapes[2] = PartialShape{rev_idx_size};
output_shapes[0] = input_shape;
output_shapes[0][normalized_axis] = std::move(output_dim_at_axis);
output_shapes[2] = PartialShape{std::move(rev_idx_size)};
}
} else {
// no axis => flattened input tensor
Expand Down
7 changes: 3 additions & 4 deletions src/core/src/op/util/broadcast_base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -197,17 +197,16 @@ void ov::op::util::BroadcastBase::validate_and_infer_types() {

if (!output_shape_defined && concat->get_output_partial_shape(0).is_static() &&
concat->get_shape().size() == 1 && concat_inputs.size() == shape_size(concat->get_shape())) {
auto output_partial_shape = std::vector<Dimension>{};
output_shape.resize(0);
for (const auto& concat_input : concat_inputs) {
auto source_node_ptr = concat_input.get_source_output().get_node_shared_ptr();
if (auto source_const_ptr = ov::as_type_ptr<ov::op::v0::Constant>(source_node_ptr)) {
output_partial_shape.emplace_back(source_const_ptr->get_axis_vector_val()[0]);
output_shape.emplace_back(source_const_ptr->get_axis_vector_val()[0]);
} else {
output_partial_shape.push_back(Dimension::dynamic());
output_shape.push_back(Dimension::dynamic());
}
}
output_shape_defined = true;
output_shape = PartialShape(output_partial_shape);
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/util/framework_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ void ov::op::util::FrameworkNode::validate_and_infer_types() {
pshape = shape_map.at(output_index);
}
if (PartialShape::merge_into(pshape, node_result.get_partial_shape())) {
shape_map[output_index] = pshape;
shape_map[output_index] = std::move(pshape);
} else {
shape_map[output_index] = PartialShape::dynamic();
}
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/util/index_reduction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void ov::op::util::IndexReduction::validate_and_infer_types() {
output_dims[i] = arg_shape[j++];
}

output_shape = PartialShape(output_dims);
output_shape = PartialShape(std::move(output_dims));
}

set_output_type(0, m_index_element_type, output_shape);
Expand Down

0 comments on commit 4fac242

Please sign in to comment.