Skip to content

Commit

Permalink
[MXNET-860] Use modernized ranged loops where possible (apache#12356)
Browse files Browse the repository at this point in the history
* [MXNET-860] Use modernized ranged loops where possible

* [MXNET-860] Use uniform ref, use const where possible

* [MXNET-860] Fix build error after conflict resolve

* [MXNET-860] Apply const, improve naming
  • Loading branch information
KellenSunderland authored and gigasquid committed Nov 19, 2018
1 parent 64657c2 commit 5a83b6b
Show file tree
Hide file tree
Showing 31 changed files with 198 additions and 199 deletions.
8 changes: 4 additions & 4 deletions src/c_api/c_api_symbolic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,8 @@ int MXSymbolListAttr(SymbolHandle symbol,
}
*out_size = attr_list.size()/2;
ret->ret_vec_charp.clear();
for (size_t i = 0; i < attr_list.size(); ++i) {
ret->ret_vec_charp.push_back(attr_list[i].c_str());
for (const auto& attr : attr_list) {
ret->ret_vec_charp.push_back(attr.c_str());
}
*out = dmlc::BeginPtr(ret->ret_vec_charp);
API_END();
Expand Down Expand Up @@ -298,8 +298,8 @@ int MXSymbolListAttrShallow(SymbolHandle symbol,
}
*out_size = attr_list.size()/2;
ret->ret_vec_charp.clear();
for (size_t i = 0; i < attr_list.size(); ++i) {
ret->ret_vec_charp.push_back(attr_list[i].c_str());
for (auto &attr : attr_list) {
ret->ret_vec_charp.push_back(attr.c_str());
}
*out = dmlc::BeginPtr(ret->ret_vec_charp);
API_END();
Expand Down
14 changes: 7 additions & 7 deletions src/c_api/c_predict_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -139,11 +139,11 @@ int _CreatePartialOut(const char* symbol_json_str,
std::unordered_set<std::string> arg_names, aux_names;
std::vector<std::string> arg_names_vec = sym.ListInputNames(Symbol::kReadOnlyArgs);
std::vector<std::string> aux_names_vec = sym.ListInputNames(Symbol::kAuxiliaryStates);
for (size_t i = 0; i < arg_names_vec.size(); ++i) {
arg_names.insert(arg_names_vec[i]);
for (const auto &arg_name : arg_names_vec) {
arg_names.insert(arg_name);
}
for (size_t i = 0; i < aux_names_vec.size(); ++i) {
aux_names.insert(aux_names_vec[i]);
for (const auto &aux_name : aux_names_vec) {
aux_names.insert(aux_name);
}
std::vector<NDArray> data;
std::vector<std::string> names;
Expand Down Expand Up @@ -508,13 +508,13 @@ int MXNDListCreate(const char* nd_file_bytes,
ret->keys.resize(arrays.size());
}
ret->indptr.push_back(0);
for (size_t i = 0; i < arrays.size(); ++i) {
TShape shape = arrays[i].shape();
for (auto &array : arrays) {
TShape shape = array.shape();
size_t begin = ret->data.size();
size_t size = shape.Size();
ret->shapes.push_back(shape);
ret->data.resize(begin + size);
arrays[i].SyncCopyToCPU(dmlc::BeginPtr(ret->data) + begin, size);
array.SyncCopyToCPU(dmlc::BeginPtr(ret->data) + begin, size);
ret->indptr.push_back(begin + size);
}
*out = ret;
Expand Down
8 changes: 4 additions & 4 deletions src/common/exec_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -591,14 +591,14 @@ inline nnvm::Graph AssignContext(nnvm::Graph g,

g.attrs["device"] = std::make_shared<dmlc::any>(std::move(device));
g = nnvm::pass::PlaceDevice(g, "__ctx_group__", device_map, "_CrossDeviceCopy");
const auto& assigned_device = g.GetAttr<nnvm::DeviceVector>("device");
const auto& assigned_devices = g.GetAttr<nnvm::DeviceVector>("device");

exec::ContextVector vcontext;
for (size_t i = 0; i < assigned_device.size(); ++i) {
if (assigned_device[i] == -1) {
for (auto context : assigned_devices) {
if (context == -1) {
vcontext.push_back(default_ctx);
} else {
vcontext.push_back(ctx_list[assigned_device[i]]);
vcontext.push_back(ctx_list[context]);
}
}

Expand Down
6 changes: 2 additions & 4 deletions src/executor/attach_op_execs_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,10 @@ class StorageFallbackOpExecutor : public OpExecutor {
if (!init_) {
pre_temp_buf_.clear();
post_temp_buf_.clear();
for (size_t i = 0; i < in_array.size(); i++) {
auto &nd = in_array[i];
for (const auto& nd : in_array) {
pre_temp_buf_.emplace_back(nd.shape(), nd.ctx(), true, nd.dtype());
}
for (size_t i = 0; i < out_array.size(); i++) {
auto &nd = out_array[i];
for (const auto& nd : out_array) {
post_temp_buf_.emplace_back(nd.shape(), nd.ctx(), true, nd.dtype());
}
init_ = true;
Expand Down
13 changes: 6 additions & 7 deletions src/executor/graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1130,14 +1130,13 @@ void GraphExecutor::InitCachedOps() {

// the variables
std::vector<Engine::VarHandle> use_vars, mutate_vars;
for (size_t i = 0; i < exec->in_array.size(); ++i) {
auto& nd = exec->in_array[i];
for (const auto& nd : exec->in_array) {
use_vars.push_back(nd.var());
}
for (auto& r : exec->op_ctx.requested) {
for (const auto& r : exec->op_ctx.requested) {
mutate_vars.push_back(r.var);
}
for (auto& nd : exec->out_array) {
for (const auto& nd : exec->out_array) {
mutate_vars.push_back(nd.var());
}
if (exec->var() != nullptr) {
Expand Down Expand Up @@ -1551,16 +1550,16 @@ static nnvm::Symbol PartitionGraph(const nnvm::Symbol& src,
std::vector<Context> aux_state_ctxes(aux_states.size());

size_t i1 = 0, i2 = 0;
for (size_t i = 0; i < input_names.size(); ++i) {
if (i2 < aux_names.size() && aux_names[i2] == input_names[i]) {
for (const auto& input_name : input_names) {
if (i2 < aux_names.size() && aux_names[i2] == input_name) {
arg_shapes.push_back(aux_states[i2].shape());
arg_dtypes.push_back(aux_states[i2].dtype());
arg_stypes.push_back(aux_states[i2].storage_type());
aux_state_ctxes[i2] = aux_states[i2].ctx();
++i2;
} else {
CHECK(i1 < arg_names.size());
CHECK_EQ(arg_names[i1], input_names[i]);
CHECK_EQ(arg_names[i1], input_name);
arg_shapes.push_back(in_args->at(i1).shape());
arg_dtypes.push_back(in_args->at(i1).dtype());
arg_stypes.push_back(in_args->at(i1).storage_type());
Expand Down
31 changes: 18 additions & 13 deletions src/imperative/cached_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -278,10 +278,10 @@ bool CachedOp::SetForwardGraph(
shape_inputs.reserve(inputs.size());
dtype_inputs.reserve(inputs.size());
storage_type_inputs.reserve(inputs.size());
for (uint32_t i = 0; i < inputs.size(); ++i) {
shape_inputs.emplace_back(inputs[i]->shape());
dtype_inputs.emplace_back(inputs[i]->dtype());
storage_type_inputs.emplace_back(inputs[i]->storage_type());
for (auto input : inputs) {
shape_inputs.emplace_back(input->shape());
dtype_inputs.emplace_back(input->dtype());
storage_type_inputs.emplace_back(input->storage_type());
}

bool match = true;
Expand Down Expand Up @@ -658,9 +658,9 @@ void CachedOp::StaticRunOps(
arg_dtypes.clear();
arg_shapes.reserve(ndinputs.size());
arg_dtypes.reserve(ndinputs.size());
for (size_t i = 0; i < ndinputs.size(); ++i) {
arg_shapes.emplace_back(ndinputs[i]->shape());
arg_dtypes.emplace_back(ndinputs[i]->dtype());
for (auto& ndinput : ndinputs) {
arg_shapes.emplace_back(ndinput->shape());
arg_dtypes.emplace_back(ndinput->dtype());
}
state.op_states[i] = createop[node.source->op()](
node.source->attrs, default_ctx, arg_shapes, arg_dtypes);
Expand Down Expand Up @@ -784,7 +784,9 @@ OpStatePtr CachedOp::DynamicForward(
states.reserve(idx.num_nodes());
std::vector<NDArray*> arrays;
arrays.reserve(buff.size());
for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
for (auto& buffered_array : buff) {
arrays.push_back(&buffered_array);
}
for (size_t i = 0; i < num_inputs; ++i) {
arrays[idx.entry_id(idx.input_nodes()[i], 0)] = inputs[i];
}
Expand Down Expand Up @@ -912,7 +914,9 @@ void CachedOp::DynamicBackward(
buff.resize(idx.num_node_entries());
std::vector<NDArray*> arrays;
arrays.reserve(buff.size());
for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
for (auto& buffered_array : buff) {
arrays.push_back(&buffered_array);
}
for (size_t i = 0; i < inputs.size(); ++i) {
if (runtime.info.bwd_input_eid[i] == kEidNotExist) {
continue;
Expand Down Expand Up @@ -1187,8 +1191,9 @@ void CachedOpBackward(const OpStatePtr& state_ptr,
in_ptrs.push_back(&(*it));
}
CHECK_EQ(in_ptrs.size(), s.op->num_backward_inputs());
for (size_t i = 0; i < out_bufs.size(); i++)
out_ptrs.push_back(&out_bufs[i]);
for (auto& out_buf : out_bufs) {
out_ptrs.push_back(&out_buf);
}
CHECK_EQ(out_ptrs.size(), s.op->num_backward_outputs());
// Set is_training correct for the imperative executor.
bool orig_is_train;
Expand Down Expand Up @@ -1293,8 +1298,8 @@ void CachedOpParamParser(nnvm::NodeAttrs* attrs) {
nnvm::Symbol sym;
sym.outputs = g.outputs;
std::vector<std::pair<std::string, std::string> > flags;
for (auto it = attrs->dict.begin(); it != attrs->dict.end(); it++)
flags.emplace_back(it->first, it->second);
for (const auto& attr : attrs->dict)
flags.emplace_back(attr.first, attr.second);
attrs->parsed = CachedOpPtr(new CachedOp(sym, flags));
}
}
Expand Down
32 changes: 17 additions & 15 deletions src/imperative/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -189,8 +189,8 @@ void Imperative::RecordOp(
std::vector<bool>* p_save_outputs) {
MXAPIThreadLocalEntry *local_buff = MXAPIThreadLocalStore::Get();

for (uint32_t i = 0; i < outputs.size(); ++i) {
CHECK(AGInfo::IsNone(*(outputs[i])))
for (auto output : outputs) {
CHECK(AGInfo::IsNone(*output))
<< "Assigning to NDArrays that are already in a computational graph "
<< "will cause undefined behavior when evaluating gradients. "
<< "Please call backward first to clear the graph or do this out side of "
Expand Down Expand Up @@ -247,8 +247,8 @@ void Imperative::RecordOp(
node->inputs[i] = inputs[i]->entry_;
}

for (uint32_t i = 0; i < outputs.size(); ++i) {
CHECK(AGInfo::IsNone(*(outputs[i])))
for (auto output : outputs) {
CHECK(AGInfo::IsNone(*output))
<< "Inplace operations (+=, -=, x[:]=, etc) are not supported when "
<< "recording with autograd.";
}
Expand Down Expand Up @@ -348,7 +348,7 @@ std::vector<NDArray*> Imperative::Backward(
exec::AggregateGradient, nullptr, nullptr,
zero_ops, "_copy");
CHECK_EQ(g_graph.outputs.size(), xs.size());
for (const auto &e : g_graph.outputs) {
for (const auto& e : g_graph.outputs) {
if (e.node->op() == nullptr) {
auto node = Node::Create();
node->attrs.op = copy_op;
Expand All @@ -375,7 +375,9 @@ std::vector<NDArray*> Imperative::Backward(
std::vector<OpStatePtr> states;
std::vector<NDArray*> arrays;
arrays.reserve(buff.size());
for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
for (auto& buffered_array : buff) {
arrays.push_back(&buffered_array);
}
if (create_graph) {
states.resize(num_forward_nodes);
nnvm::DFSVisit(sym.outputs, [&](const nnvm::NodePtr& n) {
Expand All @@ -390,12 +392,12 @@ std::vector<NDArray*> Imperative::Backward(
ref_count[eid] = 1;
}
});
for (size_t i = 0; i < ograd_entries.size(); ++i) {
AGInfo& info = AGInfo::Get(ograd_entries[i].node);
if (!idx.exist(ograd_entries[i].node.get())) continue;
size_t eid = idx.entry_id(ograd_entries[i]);
for (auto& ograd_entry : ograd_entries) {
AGInfo& info = AGInfo::Get(ograd_entry.node);
if (!idx.exist(ograd_entry.node.get())) continue;
size_t eid = idx.entry_id(ograd_entry);
buff[eid] = info.outputs[0];
buff[eid].entry_ = ograd_entries[i];
buff[eid].entry_ = ograd_entry;
}
} else {
states.reserve(num_forward_nodes);
Expand All @@ -409,10 +411,10 @@ std::vector<NDArray*> Imperative::Backward(
if (retain_graph || info.grad_req != kNullOp) ref_count[eid] = 1;
}
}
for (size_t i = 0; i < ograd_entries.size(); ++i) {
if (!idx.exist(ograd_entries[i].node.get())) continue;
AGInfo& info = AGInfo::Get(ograd_entries[i].node);
arrays[idx.entry_id(ograd_entries[i])] = &info.outputs[0];
for (auto& ograd_entry : ograd_entries) {
if (!idx.exist(ograd_entry.node.get())) continue;
AGInfo& info = AGInfo::Get(ograd_entry.node);
arrays[idx.entry_id(ograd_entry)] = &info.outputs[0];
}
}
for (size_t i = num_forward_outputs; i < graph.outputs.size(); ++i) {
Expand Down
6 changes: 3 additions & 3 deletions src/imperative/imperative_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,9 @@ void RunGraph(
arg_dtypes.clear();
arg_shapes.reserve(ndinputs.size());
arg_dtypes.reserve(ndinputs.size());
for (size_t i = 0; i < ndinputs.size(); ++i) {
arg_shapes.emplace_back(ndinputs[i]->shape());
arg_dtypes.emplace_back(ndinputs[i]->dtype());
for (auto& ndinput : ndinputs) {
arg_shapes.emplace_back(ndinput->shape());
arg_dtypes.emplace_back(ndinput->dtype());
}
states[i] = createop[node.source->op()](
node.source->attrs, ctx, arg_shapes, arg_dtypes);
Expand Down
14 changes: 7 additions & 7 deletions src/io/image_aug_default.cc
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,9 @@ class DefaultImageAugmenter : public ImageAugmenter {
void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
std::vector<std::pair<std::string, std::string> > kwargs_left;
kwargs_left = param_.InitAllowUnknown(kwargs);
for (size_t i = 0; i < kwargs_left.size(); i++) {
if (!strcmp(kwargs_left[i].first.c_str(), "rotate_list")) {
const char* val = kwargs_left[i].second.c_str();
for (auto& kwarg : kwargs_left) {
if (!strcmp(kwarg.first.c_str(), "rotate_list")) {
const char* val = kwarg.second.c_str();
const char *end = val + strlen(val);
char buf[128];
while (val < end) {
Expand Down Expand Up @@ -472,18 +472,18 @@ class DefaultImageAugmenter : public ImageAugmenter {
param_.saturation)(*prnd);
int rand_order[3] = {0, 1, 2};
std::random_shuffle(std::begin(rand_order), std::end(rand_order));
for (int i = 0; i < 3; ++i) {
if (rand_order[i] == 0) {
for (int i : rand_order) {
if (i == 0) {
// brightness
res.convertTo(res, -1, alpha_b, 0);
}
if (rand_order[i] == 1) {
if (i == 1) {
// contrast
cvtColor(res, temp_, CV_RGB2GRAY);
float gray_mean = cv::mean(temp_)[0];
res.convertTo(res, -1, alpha_c, (1 - alpha_c) * gray_mean);
}
if (rand_order[i] == 2) {
if (i == 2) {
// saturation
cvtColor(res, temp_, CV_RGB2GRAY);
cvtColor(temp_, temp_, CV_GRAY2BGR);
Expand Down
20 changes: 10 additions & 10 deletions src/io/image_det_aug_default.cc
Original file line number Diff line number Diff line change
Expand Up @@ -349,19 +349,19 @@ class ImageDetLabel {
if (!valid) return false;
// transform ground-truth labels
std::vector<ImageDetObject> new_objects;
for (auto iter = objects_.begin(); iter != objects_.end(); ++iter) {
for (auto& object : objects_) {
if (image_det_aug_default_enum::kCenter == crop_emit_mode) {
float center_x = (iter->left + iter->right) * 0.5f;
float center_y = (iter->top + iter->bottom) * 0.5f;
float center_x = (object.left + object.right) * 0.5f;
float center_y = (object.top + object.bottom) * 0.5f;
if (!crop_box.contains(cv::Point2f(center_x, center_y))) {
continue;
}
new_objects.push_back(iter->Project(crop_box));
new_objects.push_back(object.Project(crop_box));
} else if (image_det_aug_default_enum::kOverlap == crop_emit_mode) {
Rect gt_box = iter->ToRect();
Rect gt_box = object.ToRect();
float overlap = (crop_box & gt_box).area() / gt_box.area();
if (overlap > emit_overlap_thresh) {
new_objects.push_back(iter->Project(crop_box));
new_objects.push_back(object.Project(crop_box));
}
}
}
Expand All @@ -375,17 +375,17 @@ class ImageDetLabel {
*/
bool TryPad(const Rect pad_box) {
// update all objects inplace
for (auto it = objects_.begin(); it != objects_.end(); ++it) {
*it = it->Project(pad_box);
for (auto& object : objects_) {
object = object.Project(pad_box);
}
return true;
}

/*! \brief flip image and object coordinates horizontally */
bool TryMirror() {
// flip all objects horizontally
for (auto it = objects_.begin(); it != objects_.end(); ++it) {
*it = it->HorizontalFlip();
for (auto& object : objects_) {
object = object.HorizontalFlip();
}
return true;
}
Expand Down
Loading

0 comments on commit 5a83b6b

Please sign in to comment.