Skip to content

Commit

Permalink
rm legacy dygraph part7 (PaddlePaddle#49285)
Browse files Browse the repository at this point in the history
* rm legacy dygraph part7

* rm non_static_mode

* modify

* modify

* add static test

* set static for lstm_cudnn test

* reset tracer

* reset varbase

* fix
  • Loading branch information
yjjiang11 authored Dec 29, 2022
1 parent e81883e commit df3f74d
Show file tree
Hide file tree
Showing 12 changed files with 90 additions and 171 deletions.
1 change: 0 additions & 1 deletion python/paddle/fluid/dygraph/amp/auto_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import contextlib
from paddle.fluid.framework import (
Variable,
_non_static_mode,
OpProtoHolder,
Parameter,
_dygraph_tracer,
Expand Down
9 changes: 2 additions & 7 deletions python/paddle/fluid/dygraph/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
import warnings
from ..framework import (
_get_paddle_place,
_in_legacy_dygraph,
_in_eager_without_dygraph_check,
)
import paddle
Expand Down Expand Up @@ -113,11 +112,7 @@ def program_desc_tracing_guard(enable):
@signature_safe_contextmanager
def param_guard(parameters):
# Note: parameters is a reference of self._parameters or self._buffers
if (
in_declarative_mode()
and not framework._non_static_mode()
and parameters
):
if in_declarative_mode() and not framework.in_dygraph_mode() and parameters:
origin_parameters = parameters.copy()
for name, var_base in parameters.items():
if isinstance(var_base, list):
Expand Down Expand Up @@ -189,7 +184,7 @@ def enabled():
print(fluid.dygraph.enabled()) # False
"""
# TODO(jiabin): Make this check as in_dygraph_mode when we support default eager mode.
return framework._non_static_mode()
return framework.in_dygraph_mode()


def enable_dygraph(place=None):
Expand Down
1 change: 0 additions & 1 deletion python/paddle/fluid/dygraph/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from ..framework import (
Variable,
default_main_program,
_non_static_mode,
dygraph_only,
Parameter,
ParamBase,
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/dygraph/layer_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

import warnings

from paddle.fluid.framework import default_main_program, _non_static_mode
from paddle.fluid.framework import default_main_program, in_dygraph_mode


class LayerOpsRecoder:
Expand All @@ -34,7 +34,7 @@ def record_program_ops_pre_hook(layer, inputs):
"""
A pre-hook to mark op numbers before enter layer.forward.
"""
if not _non_static_mode():
if not in_dygraph_mode():
if layer._op_recorder.start < 0:
layer._op_recorder.start = len(
default_main_program().current_block().ops
Expand All @@ -55,7 +55,7 @@ def set_op_customized_attrs_post_hook(layer, inputs, outputs):
"""
A post-hook to append customized attributes into all operators generated in current layer.
"""
if not _non_static_mode() and layer._op_recorder.is_valid:
if not in_dygraph_mode() and layer._op_recorder.is_valid:

start = layer._op_recorder.start
end = len(default_main_program().current_block().ops)
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/dygraph/layer_object_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

import copy
from ..framework import Parameter, _non_static_mode, _global_flags
from ..framework import Parameter, in_dygraph_mode, _global_flags
from ..param_attr import ParamAttr
from .. import core

Expand Down Expand Up @@ -169,7 +169,7 @@ def append_activation(self, input_var, act=None, use_cudnn=None):
if (use_mkldnn is not None) and use_mkldnn:
act['use_mkldnn'] = use_mkldnn
act_type = act.pop('type')
if _non_static_mode():
if in_dygraph_mode():
res = _append_activation_in_dygraph(
input_var, act_type, use_cudnn, use_mkldnn
)
Expand Down
9 changes: 4 additions & 5 deletions python/paddle/fluid/dygraph/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
from ..param_attr import ParamAttr
from paddle.fluid.executor import Executor, global_scope
from paddle.fluid.framework import (
_non_static_mode,
convert_np_dtype_to_dtype_,
in_dygraph_mode,
)
Expand Down Expand Up @@ -153,7 +152,7 @@ def __init__(self, name_scope=None, dtype="float32"):
self._helper = LayerObjectHelper(self._full_name)
self._built = False
self._dtype = dtype
self._init_in_dynamic_mode = framework._non_static_mode()
self._init_in_dynamic_mode = in_dygraph_mode()

self._parameters = collections.OrderedDict()
# Buffers the variable (not parameter) created in layer
Expand Down Expand Up @@ -211,7 +210,7 @@ def forward(self, input):
# global setting in dygraph
# NOTE(chenweihang): nn.Layer also can be used in static mode,
# but _dygraph_tracer() can not be called in static mode
if _non_static_mode():
if in_dygraph_mode():
framework._dygraph_tracer().train_mode()
# Layer-level setting
self.training = True
Expand Down Expand Up @@ -252,7 +251,7 @@ def forward(self, input):
# global setting in dygraph
# NOTE(chenweihang): nn.Layer also can be used in static mode,
# but _dygraph_tracer() can not be called in static mode
if _non_static_mode():
if in_dygraph_mode():
framework._dygraph_tracer().eval_mode()
# Layer-level setting
self.training = False
Expand Down Expand Up @@ -1667,7 +1666,7 @@ def _check_match(key, param):
for key in state_dict.keys():
if key not in match_keys:
unexpected_keys.append(key)
if _non_static_mode():
if in_dygraph_mode():
for param, state in matched_param_state:
param.set_value(state)
else:
Expand Down
14 changes: 3 additions & 11 deletions python/paddle/fluid/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
Variable,
convert_np_dtype_to_dtype_,
_varbase_creator,
_in_legacy_dygraph,
in_dygraph_mode,
)
from ..layers.layer_function_generator import OpProtoHolder
Expand Down Expand Up @@ -123,17 +122,13 @@ def astype(self, dtype):
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)

if _in_legacy_dygraph():
return _legacy_C_ops.cast(
self, 'in_dtype', self.dtype, 'out_dtype', dtype
)
return _C_ops.cast(self, dtype)

def _scalar_elementwise_op_(var, scale, bias):
if framework.in_dygraph_mode():
return _C_ops.scale(var, float(scale), bias, True)
return _legacy_C_ops.scale(var, 'scale', scale, 'bias', bias)
else:
return _legacy_C_ops.scale(var, 'scale', scale, 'bias', bias)

def _neg_(var):
return _scalar_elementwise_op_(var, -1.0, 0.0)
Expand Down Expand Up @@ -194,10 +189,7 @@ def _T_(var):
perm = []
for i in range(len(var.shape)):
perm.insert(0, i)
if _in_legacy_dygraph():
out, _ = _legacy_C_ops.transpose2(var, 'axis', perm)
else:
out = _C_ops.transpose(var, perm)
out = _C_ops.transpose(var, perm)
return out

def _scalar_add_(var, value):
Expand Down
174 changes: 69 additions & 105 deletions python/paddle/fluid/dygraph/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,13 @@
from . import layers
from ..framework import (
Variable,
_non_static_mode,
OpProtoHolder,
Parameter,
_dygraph_tracer,
_varbase_creator,
default_main_program,
_global_flags,
in_dygraph_mode,
_in_legacy_dygraph,
)

from ..data_feeder import (
Expand Down Expand Up @@ -247,115 +245,81 @@ def forward(self, input):
# variance and variance out share the same memory
variance_out = self._variance

if _non_static_mode():
if in_dygraph_mode():
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
input,
self._mean,
self._variance,
self.weight,
self.bias,
not self.training,
self._momentum,
self._epsilon,
self._data_layout,
self._use_global_stats,
self._trainable_statistics,
)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn
)

elif _in_legacy_dygraph():
attrs = (
"momentum",
self._momentum,
"epsilon",
self._epsilon,
"is_test",
not self.training,
"data_layout",
self._data_layout,
"use_mkldnn",
self._use_mkldnn,
"fuse_with_relu",
self._fuse_with_relu,
"use_global_stats",
self._use_global_stats,
'trainable_statistics',
self._trainable_statistics,
)
batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm(
input,
self.weight,
self.bias,
self._mean,
self._variance,
None,
mean_out,
variance_out,
*attrs
)

if in_dygraph_mode():
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
input,
self._mean,
self._variance,
self.weight,
self.bias,
not self.training,
self._momentum,
self._epsilon,
self._data_layout,
self._use_global_stats,
self._trainable_statistics,
)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn
)
else:
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
)

check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
)

attrs = {
"momentum": self._momentum,
"epsilon": self._epsilon,
"is_test": self._is_test,
"data_layout": self._data_layout,
"use_mkldnn": False,
"fuse_with_relu": self._fuse_with_relu,
"use_global_stats": self._use_global_stats,
"trainable_statistics": self._trainable_statistics,
}

inputs = {
"X": [input],
"Scale": [self.weight],
"Bias": [self.bias],
"Mean": [self._mean],
"Variance": [self._variance],
}

saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True
)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True
)
reserve_space = self._helper.create_variable_for_type_inference(
dtype=self._helper.input_dtype(input), stop_gradient=True
)

batch_norm_out = (
input
if self._in_place
else self._helper.create_variable_for_type_inference(self._dtype)
)
attrs = {
"momentum": self._momentum,
"epsilon": self._epsilon,
"is_test": self._is_test,
"data_layout": self._data_layout,
"use_mkldnn": False,
"fuse_with_relu": self._fuse_with_relu,
"use_global_stats": self._use_global_stats,
"trainable_statistics": self._trainable_statistics,
}

inputs = {
"X": [input],
"Scale": [self.weight],
"Bias": [self.bias],
"Mean": [self._mean],
"Variance": [self._variance],
}

saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True
)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True
)
reserve_space = self._helper.create_variable_for_type_inference(
dtype=self._helper.input_dtype(input), stop_gradient=True
)

outputs = {
"Y": [batch_norm_out],
"MeanOut": [mean_out],
"VarianceOut": [variance_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
if reserve_space is not None:
outputs["ReserveSpace"] = [reserve_space]
batch_norm_out = (
input
if self._in_place
else self._helper.create_variable_for_type_inference(
self._dtype
)
)

self._helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [mean_out],
"VarianceOut": [variance_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
if reserve_space is not None:
outputs["ReserveSpace"] = [reserve_space]

self._helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)

# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act)
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act)


class RowConv(layers.Layer):
Expand Down Expand Up @@ -410,7 +374,7 @@ def __init__(
self, name_scope, future_context_size, param_attr=None, act=None
):
assert (
not _non_static_mode()
not in_dygraph_mode()
), "RowConv is not supported by dynamic graph mode yet!"
super().__init__(name_scope)
self._act = act
Expand Down
Loading

0 comments on commit df3f74d

Please sign in to comment.