Skip to content

Commit

Permalink
[clang-tidy] NO.17 performance-for-range-copy (#57809)
Browse files Browse the repository at this point in the history
* fix

* fix
  • Loading branch information
enkilee authored Oct 12, 2023
1 parent 5c13f7b commit 81eb71b
Show file tree
Hide file tree
Showing 58 changed files with 124 additions and 124 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ modernize-use-override,
modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions,
performance-faster-string-find,
-performance-for-range-copy,
performance-for-range-copy,
-performance-implicit-conversion-in-loop,
-performance-inefficient-algorithm,
performance-inefficient-string-concatenation,
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/auto_parallel/dist_attr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ OperatorDistAttr& OperatorDistAttr::operator=(

void OperatorDistAttr::initialize(const OpDesc* op) {
if (op == nullptr) return;
for (std::string name : op->InputArgumentNames()) {
for (std::string const& name : op->InputArgumentNames()) {
VarDesc* input = op->Block()->FindVarRecursive(name);
VLOG(4) << "[OperatorDistAttr create input dist attr] " << name;
if (input == nullptr || op->Type() == "create_py_reader") {
Expand All @@ -92,7 +92,7 @@ void OperatorDistAttr::initialize(const OpDesc* op) {
input_dist_attrs_[name] = TensorDistAttr(get_tensor_shape(input));
}
}
for (std::string name : op->OutputArgumentNames()) {
for (std::string const& name : op->OutputArgumentNames()) {
VarDesc* output = op->Block()->FindVarRecursive(name);
VLOG(4) << "[OperatorDistAttr create output dist attr] " << name;
if (output == nullptr) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/fleet_executor/carrier.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ void Carrier::CopyParameters(
const framework::ProgramDesc& program,
const std::vector<std::string>& inference_root_scope_vars) {
std::map<std::string, int> inference_root_scope_var_map;
for (auto var_name : inference_root_scope_vars) {
for (auto const& var_name : inference_root_scope_vars) {
inference_root_scope_var_map.insert({var_name, 1});
}
for (size_t i = 0; i < program.Size(); ++i) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ InterceptorMessage ComputeInterceptor::PrepareVarsMsg() {
ready_msg.set_message_type(DATA_WITH_VARS);
ready_msg.set_scope_idx(cur_scope_id_);
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
for (auto iter : node_->vars_to_dtype()) {
for (auto const& iter : node_->vars_to_dtype()) {
VarList* vars = ready_msg.add_vars_list();
const auto& var_name = iter.first;
vars->set_name(var_name);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/fleet_executor/dist_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ void DistModel::InsertCommOp(std::string tmp_var_name,
<< ". The ring id is: " << ring_id << ". The group has: " << nranks
<< " ranks. Current rank in the group is: " << rank
<< ". The endpoint is: " << endpoint << ". Peer endpoints are: ";
for (auto ep : peer_endpoints) {
for (const auto &ep : peer_endpoints) {
ss << ep << ", ";
}
VLOG(3) << ss.str();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/fleet_executor/fleet_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ void PreventVarsDelete(
for (const auto& pair : *unused_vars) {
const framework::OperatorBase* op = pair.first;
std::vector<std::string> cur_unused = pair.second;
for (auto name : vars_not_gc) {
for (auto const& name : vars_not_gc) {
auto iter = std::find(cur_unused.begin(), cur_unused.end(), name);
if (iter != cur_unused.end()) {
VLOG(3) << "Removing var: [" << name
Expand Down Expand Up @@ -165,7 +165,7 @@ void FleetExecutor::Init(
while_block_vars = GetUnusedVarsAfterWhile(
program_desc, task_node, inference_root_scope_vars);
VLOG(3) << "Vars will be gced after while op";
for (auto var : while_block_vars) {
for (auto const& var : while_block_vars) {
VLOG(3) << var;
}
task_node->SetWhileBlockVars(while_block_vars);
Expand Down
22 changes: 11 additions & 11 deletions paddle/fluid/eager/auto_code_generator/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ static std::pair<std::string, std::string> GetAttrType(
ret = "std::vector<std::string>";
if (is_arg) ret += "&";
val += "{";
for (auto x : PADDLE_GET_CONST(std::vector<std::string>, attr)) {
for (auto const& x : PADDLE_GET_CONST(std::vector<std::string>, attr)) {
val += "\"" + x + "\"" + ",";
}
if (val.size() > 1) val.pop_back();
Expand Down Expand Up @@ -1238,7 +1238,7 @@ static std::string GenerateGradNodeCreationContent(
bool found_target_name = false;
for (const auto& iter : op_base_infos) {
const auto& grad_outs_slot_map = iter.GetGradOutsSlotnameMap();
for (auto iter : grad_outs_slot_map) {
for (auto const& iter : grad_outs_slot_map) {
if ((!found_target_name) && (input_name == iter.second)) {
const char* SET_GRAD_OUT_META_TEMPLATE =
" grad_node->SetGradOutMeta(%s, %d);\n";
Expand All @@ -1256,7 +1256,7 @@ static std::string GenerateGradNodeCreationContent(
bool found_target_name = false;
for (const auto& iter : op_base_infos) {
const auto& grad_outs_slot_map = iter.GetGradOutsSlotnameMap();
for (auto iter : grad_outs_slot_map) {
for (auto const& iter : grad_outs_slot_map) {
if ((!found_target_name) && (input_name == iter.second)) {
const char* SET_GRAD_OUT_META_TEMPLATE =
" grad_node->SetGradOutMeta(%s, %d);\n";
Expand Down Expand Up @@ -2142,7 +2142,7 @@ static std::string GenerateSingleOpBase(
// [Generation] Get Full Zero
std::string fill_zero_str = "";
if (ops_to_fill_zero_for_empty_grads.count(fwd_op_type)) {
for (auto iter : grad_ins) {
for (auto const& iter : grad_ins) {
const std::string& grad_input_name = iter.first;
if (grad_ins_grad_slotname_map.count(grad_input_name)) {
size_t fwd_output_position = fwd_outputs_name_pos_map.at(
Expand Down Expand Up @@ -2189,7 +2189,7 @@ static std::string GenerateSingleOpBase(
"backward_inplace_tensor" + std::to_string(*outs_size);
bool process_backward_inplace = false;
std::string ins_contents_str = "";
for (auto iter : grad_ins) {
for (auto const& iter : grad_ins) {
const std::string& grad_input_name = iter.first;

if (grad_ins_fwd_slotname_map.count(grad_input_name)) {
Expand Down Expand Up @@ -2293,7 +2293,7 @@ static std::string GenerateSingleOpBase(
paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str);
generated_grad_function_body += ins_map_str;

for (auto iter : grad_ins) {
for (auto const& iter : grad_ins) {
const std::string& grad_input_name = iter.first;

if (grad_ins_fwd_slotname_map.count(grad_input_name)) {
Expand Down Expand Up @@ -2335,7 +2335,7 @@ static std::string GenerateSingleOpBase(
VLOG(6) << "Generated Ins Map";
// [Generation] Get Outs Map
std::string outs_contents_str = "";
for (auto iter : grad_outs) {
for (auto const& iter : grad_outs) {
const std::string& grad_output_name = iter.first;

if (grad_outs_slotname_map.count(grad_output_name)) {
Expand Down Expand Up @@ -2440,7 +2440,7 @@ static std::string GenerateSingleOpBase(
generated_grad_function_body += outs_map_str;
generated_grad_function_body += outs_contents_str;
generated_grad_function_body += "\n";
for (auto iter : grad_outs) {
for (auto const& iter : grad_outs) {
const std::string& grad_output_name = iter.first;

if (grad_outs_slotname_map.count(grad_output_name)) {
Expand Down Expand Up @@ -2498,7 +2498,7 @@ static std::string GenerateSingleOpBase(
"%s[\"%s\"][0]);\n"
" };\n";
std::string backward_inplace_map_str = "";
for (auto iter : backward_inplace_map) {
for (auto const& iter : backward_inplace_map) {
std::string backward_inplace_input_name = iter.first;
std::string backward_inplace_output_name = iter.second;
backward_inplace_map_str += paddle::string::Sprintf(
Expand Down Expand Up @@ -2553,7 +2553,7 @@ static std::string GenerateSingleOpBase(
// [Generation] Get Return
std::string outputs_str = "";
size_t num_appended_outputs = 0;
for (auto iter : grad_outs) {
for (auto const& iter : grad_outs) {
const std::string& grad_out_name = iter.first;
const std::string& fwd_name = grad_outs_slotname_map.at(grad_out_name);

Expand Down Expand Up @@ -2594,7 +2594,7 @@ static std::string GenerateSingleOpBase(

/* Handle Special Case: "PullSparseOp", etc
For returns, append "GradOut" to the very end of return list. */
for (auto iter : grad_outs) {
for (auto const& iter : grad_outs) {
const std::string& grad_out_name = iter.first;
const std::string& fwd_name = grad_outs_slotname_map.at(grad_out_name);

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/backward.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ void EnforceGradNodeHasInput(GradNodeBase* node) {
void DuplicateCheck(const std::vector<paddle::Tensor>& inputs, bool is_input) {
std::unordered_set<AutogradMeta*> visisted_ins;
std::string msg = is_input ? "inputs" : "outputs";
for (auto in : inputs) {
for (auto const& in : inputs) {
AutogradMeta* auto_grad_meta = EagerUtils::unsafe_autograd_meta(in);
PADDLE_ENFORCE_EQ(
visisted_ins.count(auto_grad_meta),
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/eager/custom_operator/custom_operator_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -190,12 +190,12 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
}
}

for (auto it : fwd_outs) {
for (auto it : fwd_outs) { // NOLINT
VLOG(7) << "Insert fwd_outs to grad_inputs: " << it.first;
tmp_ins[it.first] = RunCustomOpNode::Recover(&(it.second));
}

for (auto it : fwd_ins) {
for (auto it : fwd_ins) { // NOLINT
// NOTE(HongyuJia): returned tensor maybe un-defined tensor when inputs
// optional<Tensor>
VLOG(7) << "Insert fwd_ins to grad_inputs: " << it.first;
Expand Down Expand Up @@ -406,12 +406,12 @@ RunCustomOpDoubleGradNode::operator()(
}
}

for (auto it : fwd_outs) {
for (auto it : fwd_outs) { // NOLINT
VLOG(7) << "Insert fwd_outs to grad_inputs: " << it.first;
tmp_ins[it.first] = RunCustomOpDoubleGradNode::Recover(&(it.second));
}

for (auto it : fwd_ins) {
for (auto it : fwd_ins) { // NOLINT
VLOG(7) << "Insert fwd_ins to grad_inputs: " << it.first;
tmp_ins[it.first] = RunCustomOpDoubleGradNode::Recover(&(it.second));
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/data_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ std::vector<std::string> DatasetImpl<T>::GetSlots() {
}
}
std::cout << "dataset use slots: ";
for (auto s : use_slots_) {
for (auto const& s : use_slots_) {
std::cout << s << " | ";
}
std::cout << " end " << std::endl;
Expand Down Expand Up @@ -216,7 +216,7 @@ template <typename T>
std::vector<paddle::framework::DataFeed*> DatasetImpl<T>::GetReaders() {
std::vector<paddle::framework::DataFeed*> ret;
ret.reserve(readers_.size());
for (auto i : readers_) {
for (auto const& i : readers_) {
ret.push_back(i.get());
}
return ret;
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/framework/downpour_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ void DownpourWorker::CollectLabelInfo(size_t table_idx) {
static_cast<int>(table_idx)));

TableParameter table;
for (auto i : param_.sparse_table()) {
for (auto const& i : param_.sparse_table()) {
if (i.table_id() == table_id) {
table = i;
break;
Expand Down Expand Up @@ -191,7 +191,7 @@ void DownpourWorker::FillSparseValue(size_t table_idx) {
static_cast<int>(table_idx)));

TableParameter table;
for (auto i : param_.sparse_table()) {
for (auto const& i : param_.sparse_table()) {
if (i.table_id() == table_id) {
table = i;
break;
Expand Down Expand Up @@ -513,7 +513,7 @@ void DownpourWorker::TrainFilesWithProfiler() {
uint64_t tid = static_cast<uint64_t>(
param_.program_config(0).pull_sparse_table_id(i));
TableParameter table;
for (auto j : param_.sparse_table()) {
for (auto const& j : param_.sparse_table()) {
if (j.table_id() == tid) {
table = j;
break;
Expand Down Expand Up @@ -599,7 +599,7 @@ void DownpourWorker::TrainFilesWithProfiler() {
uint64_t tid = static_cast<uint64_t>(
param_.program_config(0).push_sparse_table_id(i));
TableParameter table;
for (auto i : param_.sparse_table()) {
for (auto const& i : param_.sparse_table()) {
if (i.table_id() == tid) {
table = i;
break;
Expand Down Expand Up @@ -819,7 +819,7 @@ void DownpourWorker::TrainFiles() {
uint64_t tid = static_cast<uint64_t>(
param_.program_config(0).pull_sparse_table_id(i));
TableParameter table;
for (auto j : param_.sparse_table()) {
for (auto const& j : param_.sparse_table()) {
if (j.table_id() == tid) {
table = j;
break;
Expand Down Expand Up @@ -936,7 +936,7 @@ void DownpourWorker::TrainFiles() {
uint64_t tid = static_cast<uint64_t>(
param_.program_config(0).push_sparse_table_id(i));
TableParameter table;
for (auto i : param_.sparse_table()) {
for (auto const& i : param_.sparse_table()) {
if (i.table_id() == tid) {
table = i;
break;
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/downpour_worker_opt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ void DownpourWorkerOpt::CreateThreadOperatorsWithRerank(
uint64_t tid =
static_cast<uint64_t>(param_.program_config(0).pull_sparse_table_id(i));
TableParameter table;
for (auto j : param_.sparse_table()) {
for (auto const& j : param_.sparse_table()) {
if (j.table_id() == tid) {
table = j;
break;
Expand Down Expand Up @@ -315,7 +315,7 @@ void DownpourWorkerOpt::TrainFiles() {
uint64_t tid =
static_cast<uint64_t>(param_.program_config(0).pull_sparse_table_id(i));
TableParameter table;
for (auto j : param_.sparse_table()) {
for (auto const& j : param_.sparse_table()) {
if (j.table_id() == tid) {
table = j;
break;
Expand Down Expand Up @@ -344,7 +344,7 @@ void DownpourWorkerOpt::TrainFiles() {
uint64_t tid = static_cast<uint64_t>(
param_.program_config(0).pull_sparse_table_id(i));
TableParameter table;
for (auto j : param_.sparse_table()) {
for (auto const& j : param_.sparse_table()) {
if (j.table_id() == tid) {
table = j;
break;
Expand Down Expand Up @@ -455,7 +455,7 @@ void DownpourWorkerOpt::TrainFiles() {
uint64_t tid = static_cast<uint64_t>(
param_.program_config(0).push_sparse_table_id(i));
TableParameter table;
for (auto i : param_.sparse_table()) {
for (auto const& i : param_.sparse_table()) {
if (i.table_id() == tid) {
table = i;
break;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/hogwild_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void HogwildWorker::SetZero(phi::DenseTensor *tensor,
void HogwildWorker::BindingDataFeedMemory() {
const std::vector<std::string> &input_feed =
device_reader_->GetUseSlotAlias();
for (auto name : input_feed) {
for (auto const &name : input_feed) {
device_reader_->AddFeedVar(thread_scope_->FindVar(name), name);
}
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/add_support_int8_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ void AddSupportInt8Pass::ApplyImpl(ir::Graph* graph) const {
// scale for one output
for (auto out_node : quant_op->outputs) {
for (auto out_op_node : out_node->outputs) {
for (auto name : out_op_node->Op()->InputNames()) {
for (auto input_name : out_op_node->Op()->Input(name)) {
for (auto const& name : out_op_node->Op()->InputNames()) {
for (auto const& input_name : out_op_node->Op()->Input(name)) {
if (out_op_node->Op()->HasAttr("Input_scale_" + input_name)) {
for (size_t i = 0; i < quanted_op_desc->OutputNames().size();
i++) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ class CoalesceGradTensorPass : public ir::Pass {
const {
if (params_grads.empty()) return true;
auto dtype = GetDtypeOfVar(vars_info, params_grads.front().second);
for (auto p_g : params_grads) {
for (auto const &p_g : params_grads) {
auto next_dtype = GetDtypeOfVar(vars_info, p_g.second);
if (next_dtype != dtype) {
return false;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/constant_folding_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const {
map[out_node->Name()] = 0;
}
// Forbid other node in graph having the same name with nodes in map
for (auto iter : map) {
for (auto const &iter : map) {
for (auto node : graph->Nodes()) {
if (node->IsVar() && node->Name() == iter.first) {
map[node->Name()]++;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/delete_cast_op_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ static std::vector<Node*> FindOpNodeWithInputName(
if (!node->IsOp()) continue;
auto inputs = node->Op()->Inputs();
bool find_input = false;
for (auto input : inputs) {
for (auto const& input : inputs) {
auto input_names = input.second;
if (std::count(input_names.begin(), input_names.end(), input_name) > 0) {
find_input = true;
Expand All @@ -103,7 +103,7 @@ static std::vector<Node*> FindOpNodeWithOutputName(
if (!node->IsOp()) continue;
auto outputs = node->Op()->Outputs();
bool find_output = false;
for (auto output : outputs) {
for (auto const& output : outputs) {
auto output_names = output.second;
if (std::count(output_names.begin(), output_names.end(), output_name) >
0) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/delete_repeated_ops_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ namespace ir {
bool HasOutVarName(Node* op_node, std::string name) {
auto* op_desc = op_node->Op();
auto outputs = op_desc->Outputs();
for (auto iter : outputs) {
for (auto const& iter : outputs) {
auto out_names = iter.second;
if (std::count(out_names.begin(), out_names.end(), name) > 0) {
return true;
Expand Down Expand Up @@ -155,7 +155,7 @@ void DeleteRepeatedOpsPass::DeleteRepeatedOps(
}
}

for (auto iter : ops_map) {
for (auto const& iter : ops_map) {
auto ops = iter.second;
auto* first_op_out = ops[0]->outputs[0];
auto first_op_out_name = first_op_out->Name();
Expand Down
Loading

0 comments on commit 81eb71b

Please sign in to comment.