Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename now_grad_arg to current_grad #5466

Merged
merged 2 commits into from
Jul 12, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 21 additions & 21 deletions oneflow/core/autograd/autograd_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,24 @@ namespace {
bool IsReadyToRun(const std::vector<std::shared_ptr<AutogradMeta>>& out_meta_datas) {
return std::any_of(out_meta_datas.begin(), out_meta_datas.end(),
[](const std::shared_ptr<AutogradMeta>& meta_data) {
return !meta_data->now_grad_arg()->Empty();
return !meta_data->current_grad()->Empty();
});
}

Maybe<void> CopyOrAccGrad(AutogradMeta* autograd_meta, bool autograd_mode) {
autograd::AutoGradMode mode(autograd_mode);
auto now_grad = JUST(autograd_meta->now_grad_arg()->GetAccTensor());
if (!now_grad) { return Maybe<void>::Ok(); }
auto current_grad = JUST(autograd_meta->current_grad()->GetAccTensor());
if (!current_grad) { return Maybe<void>::Ok(); }
for (const auto& hook : autograd_meta->hooks()) {
auto new_grad = hook(now_grad);
if (new_grad) { now_grad = new_grad; }
auto new_grad = hook(current_grad);
if (new_grad) { current_grad = new_grad; }
}
if (autograd_meta->acc_grad()) {
const auto& output =
JUST(functional::Add(autograd_meta->acc_grad(), now_grad, /*inplace=*/true));
JUST(functional::Add(autograd_meta->acc_grad(), current_grad, /*inplace=*/true));
autograd_meta->set_acc_grad(output);
} else {
autograd_meta->set_acc_grad(now_grad);
autograd_meta->set_acc_grad(current_grad);
}
return Maybe<void>::Ok();
}
Expand Down Expand Up @@ -103,7 +103,7 @@ Maybe<void> FunctionNode::AccGrad4LeafTensor(bool create_graph) {

void FunctionNode::ReleaseOutTensorArgs() {
for (const std::shared_ptr<AutogradMeta>& meta_data : output_meta_datas_) {
meta_data->now_grad_arg()->Release();
meta_data->current_grad()->Release();
}
}

Expand All @@ -121,10 +121,10 @@ Maybe<bool> FunctionNode::Apply(bool create_graph) {
TensorTuple input_grads(input_meta_datas_.size());
TensorTuple output_grads(output_meta_datas_.size());
for (int i = 0; i < output_meta_datas_.size(); ++i) {
if (output_meta_datas_.at(i)->now_grad_arg()->Empty()) {
if (output_meta_datas_.at(i)->current_grad()->Empty()) {
output_grads.at(i) = JUST(output_tensor_infos_.at(i).zeros());
} else {
output_grads.at(i) = JUST(output_meta_datas_.at(i)->now_grad_arg()->GetAccTensor());
output_grads.at(i) = JUST(output_meta_datas_.at(i)->current_grad()->GetAccTensor());
}
}
JUST((*backward_fn_)(output_grads, &input_grads, create_graph));
Expand All @@ -135,7 +135,7 @@ Maybe<bool> FunctionNode::Apply(bool create_graph) {
<< " calculate grad for tensor which requires_grad is False. Please submit an issue in "
"`https://github.com/Oneflow-Inc/oneflow/issues` and we will fix it as soon as "
"possiable";
JUST(input_meta_datas_.at(i)->now_grad_arg()->PushPartialTensor(input_grads.at(i)));
JUST(input_meta_datas_.at(i)->current_grad()->PushPartialTensor(input_grads.at(i)));
}
}
return true;
Expand All @@ -157,7 +157,7 @@ Maybe<void> StackAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const Tensor
bool create_graph) {
ClearReleasedFunctionNodes();
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
// Runs each FunctionNode
for (const auto& weak_func_node : node_list_) {
Expand All @@ -178,14 +178,14 @@ Maybe<TensorTuple> StackAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
ClearReleasedFunctionNodes();
std::shared_ptr<TensorTuple> input_now_grads = std::make_shared<TensorTuple>(inputs.size());
std::shared_ptr<TensorTuple> input_current_grad = std::make_shared<TensorTuple>(inputs.size());
std::vector<bool> ori_retain_grad(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
ori_retain_grad.at(i) = inputs.at(i)->retain_grad();
JUST(inputs.at(i)->set_retain_grad(true));
}
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
// Runs each FunctionNode
for (const auto& weak_func_node : node_list_) {
Expand All @@ -199,14 +199,14 @@ Maybe<TensorTuple> StackAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
}
// Gets input grads and resume retain_grad
for (int i = 0; i < inputs.size(); ++i) {
input_now_grads->at(i) = JUST(inputs.at(i)->acc_grad());
input_current_grad->at(i) = JUST(inputs.at(i)->acc_grad());
if (!ori_retain_grad.at(i)) {
JUST(inputs.at(i)->set_acc_grad(nullptr));
JUST(inputs.at(i)->set_retain_grad(false));
}
}
if (!retain_graph) { ClearEngine(); }
return input_now_grads;
return input_current_grad;
}

Maybe<FunctionNode> StackAutogradEngine::AddBackwardFuncPtr(
Expand Down Expand Up @@ -385,7 +385,7 @@ Maybe<void> GraphAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const Tensor
bool retain_graph,
bool create_graph) {
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
GraphTask graph_task(outputs, retain_graph, create_graph);
JUST(graph_task.ComputeDependencies());
Expand All @@ -396,29 +396,29 @@ Maybe<void> GraphAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const Tensor
Maybe<TensorTuple> GraphAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
std::shared_ptr<TensorTuple> input_now_grads = std::make_shared<TensorTuple>(inputs.size());
std::shared_ptr<TensorTuple> input_current_grad = std::make_shared<TensorTuple>(inputs.size());
GraphTask graph_task(outputs, retain_graph, create_graph);
std::vector<bool> ori_retain_grad(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
ori_retain_grad.at(i) = inputs.at(i)->retain_grad();
JUST(inputs.at(i)->set_retain_grad(true));
}
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->now_grad_arg())->PushPartialTensor(out_grads.at(i)));
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}

JUST(graph_task.ComputeDependenciesAndPruneNode(inputs));
JUST(graph_task.Apply(/*save_grad_for_leaf=*/false));

// Gets input grads and resume retain_grad
for (int i = 0; i < inputs.size(); ++i) {
input_now_grads->at(i) = JUST(inputs.at(i)->acc_grad());
input_current_grad->at(i) = JUST(inputs.at(i)->acc_grad());
if (!ori_retain_grad.at(i)) {
JUST(inputs.at(i)->set_acc_grad(nullptr));
JUST(inputs.at(i)->set_retain_grad(false));
}
}
return input_now_grads;
return input_current_grad;
}

Maybe<FunctionNode> GraphAutogradEngine::AddBackwardFuncPtr(
Expand Down
6 changes: 3 additions & 3 deletions oneflow/core/autograd/autograd_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,11 @@ class AutogradMeta final {
: is_leaf_(is_leaf),
requires_grad_(requires_grad),
retain_grad_(false),
now_grad_arg_(new TensorArg) {}
current_grad_(new TensorArg) {}

// Getters
const std::shared_ptr<Tensor>& acc_grad() const { return acc_grad_; }
const std::shared_ptr<TensorArg>& now_grad_arg() const { return now_grad_arg_; }
const std::shared_ptr<TensorArg>& current_grad() const { return current_grad_; }
bool requires_grad() const { return requires_grad_; }
bool is_leaf() const { return is_leaf_; }
bool retain_grad() const { return retain_grad_; }
Expand All @@ -68,7 +68,7 @@ class AutogradMeta final {
bool retain_grad_;

std::shared_ptr<Tensor> acc_grad_;
std::shared_ptr<TensorArg> now_grad_arg_;
std::shared_ptr<TensorArg> current_grad_;
std::vector<Hook> hooks_;
};

Expand Down
8 changes: 4 additions & 4 deletions oneflow/core/framework/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ class Tensor {
virtual bool retain_grad() const = 0;
virtual std::shared_ptr<const FunctionNode> grad_fn_node() const = 0;
virtual Maybe<Tensor> acc_grad() const = 0;
virtual Maybe<TensorArg> now_grad_arg() const = 0;
virtual Maybe<TensorArg> current_grad() const = 0;
virtual Maybe<Tensor> detach() const = 0;
virtual Maybe<Tensor> clone() const = 0;
virtual std::shared_ptr<Tensor> data() const = 0;
Expand Down Expand Up @@ -116,7 +116,7 @@ class TensorIf : public Tensor {

// Getters for autograd
// acc_grad is tensor's accumulated grad in more than once backward operation,
// and now_grad_arg is temporary grad to shared data with different FunctionNode
// and current_grad is temporary grad to shared data with different FunctionNode
std::shared_ptr<const FunctionNode> grad_fn_node() const override { return grad_fn_node_; }

// Setters for autograd
Expand Down Expand Up @@ -168,7 +168,7 @@ class MirroredTensor final : public TensorIf<MirroredTensor>,

// Getters for autograd
Maybe<Tensor> acc_grad() const override { return impl_->acc_grad(); }
Maybe<TensorArg> now_grad_arg() const override { return impl_->now_grad_arg(); }
Maybe<TensorArg> current_grad() const override { return impl_->current_grad(); }
bool requires_grad() const override { return impl_->requires_grad(); }
bool is_leaf() const override { return impl_->is_leaf(); }
bool retain_grad() const override { return impl_->retain_grad(); }
Expand Down Expand Up @@ -261,7 +261,7 @@ class ConsistentTensor final : public TensorIf<ConsistentTensor> {

// Getters for autograd
Maybe<Tensor> acc_grad() const override { return impl_->acc_grad(); }
Maybe<TensorArg> now_grad_arg() const override { return impl_->now_grad_arg(); }
Maybe<TensorArg> current_grad() const override { return impl_->current_grad(); }
bool requires_grad() const override { return impl_->requires_grad(); }
bool is_leaf() const override { return impl_->is_leaf(); }
bool retain_grad() const override { return impl_->retain_grad(); }
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/framework/tensor_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ Maybe<Tensor> TensorImpl::acc_grad() const {
return autograd_meta_->acc_grad();
}

Maybe<TensorArg> TensorImpl::now_grad_arg() const {
Maybe<TensorArg> TensorImpl::current_grad() const {
CHECK_NOTNULL_OR_RETURN(autograd_meta_);
return autograd_meta_->now_grad_arg();
return autograd_meta_->current_grad();
}

Maybe<void> TensorImpl::set_acc_grad(const std::shared_ptr<Tensor>& grad) {
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/framework/tensor_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class TensorImpl {

// Getters for autograd
Maybe<Tensor> acc_grad() const;
Maybe<TensorArg> now_grad_arg() const;
Maybe<TensorArg> current_grad() const;
bool requires_grad() const { return requires_grad_; }
bool is_leaf() const { return is_leaf_; }
bool retain_grad() const { return autograd_meta_->retain_grad(); }
Expand Down