Skip to content

Commit

Permalink
rm in_legacy part8 (PaddlePaddle#49386)
Browse files Browse the repository at this point in the history
* rm legacy layers part6

* rm non_static_mode

* modify non_static

* minor change

* rm loss

* rm in_legacy part8

* minor change
  • Loading branch information
yjjiang11 authored Dec 29, 2022
1 parent 0c52e8a commit 1c7ae95
Show file tree
Hide file tree
Showing 11 changed files with 794 additions and 1,168 deletions.
61 changes: 25 additions & 36 deletions python/paddle/fluid/layers/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from ..layer_helper import LayerHelper, unique_name
from ..framework import Variable, in_dygraph_mode, _in_legacy_dygraph
from ..framework import Variable, in_dygraph_mode
import paddle
from paddle import _C_ops, _legacy_C_ops

Expand Down Expand Up @@ -120,42 +120,31 @@ def _c_allgather(x, nranks, ring_id=0, use_calc_stream=False):
task = group.process_group.all_gather(x, out)
task.wait()
return out

if _in_legacy_dygraph():
attrs = (
'nranks',
nranks,
'ring_id',
ring_id,
'use_calc_stream',
use_calc_stream,
else:
helper = LayerHelper(op_type, **locals())
out_shape = list(x.shape[:])
if out_shape[0] > 0:
out_shape[0] *= nranks
out = helper.create_variable(
name=unique_name.generate_with_ignorable_key(
'.'.join([x.name, op_type])
),
shape=out_shape,
dtype=x.dtype,
type=x.type,
persistable=x.persistable,
)
return _legacy_C_ops.c_allgather(x, *attrs)

helper = LayerHelper(op_type, **locals())
out_shape = list(x.shape[:])
if out_shape[0] > 0:
out_shape[0] *= nranks
out = helper.create_variable(
name=unique_name.generate_with_ignorable_key(
'.'.join([x.name, op_type])
),
shape=out_shape,
dtype=x.dtype,
type=x.type,
persistable=x.persistable,
)
helper.append_op(
type=op_type,
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={
'nranks': nranks,
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
},
)
return out
helper.append_op(
type=op_type,
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={
'nranks': nranks,
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
},
)
return out


def _c_reducescatter(x, nranks, ring_id=0, use_calc_stream=False):
Expand Down
58 changes: 28 additions & 30 deletions python/paddle/fluid/layers/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@
Program,
Variable,
Operator,
_non_static_mode,
static_only,
_in_legacy_dygraph,
in_dygraph_mode,
)
from ..layer_helper import LayerHelper, unique_name
Expand Down Expand Up @@ -1154,7 +1152,7 @@ def body(i, ten):
"but given shape as {0}.".format(list(pre_cond.shape))
)

if _non_static_mode():
if in_dygraph_mode():
now_cond = pre_cond.numpy()[0]
while now_cond:
output_vars = body(*loop_vars)
Expand All @@ -1168,33 +1166,33 @@ def body(i, ten):
now_cond = cond(*output_vars).numpy()[0]
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars

while_loop_block = While(pre_cond, is_test, name)
has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
with while_loop_block.block():
# If a variable with mutable type is included in loop_vars, like `dict/list`,
# modifying it in the body function will cause origin variable to be modified
# synchronously. This will raise an assignment error out of while block.
# Here we make a copy of the mutable vars to avoid this problem.
if has_mutable_vars_in_loop:
new_loop_vars = copy_mutable_vars(loop_vars)
output_vars = body(*new_loop_vars)
else:
output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)):
output_vars = [output_vars]
try:
loop_vars = _deal_with_undefined_var(output_vars, loop_vars)
assert_same_structure(output_vars, loop_vars, check_types=False)
except ValueError as e:
raise ValueError(
"body in while_loop should return the same arity "
"(length and structure) as loop_vars: {0}".format(e)
)
now_cond = cond(*output_vars)
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
assign(now_cond, pre_cond)
return loop_vars
else:
while_loop_block = While(pre_cond, is_test, name)
has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
with while_loop_block.block():
# If a variable with mutable type is included in loop_vars, like `dict/list`,
# modifying it in the body function will cause origin variable to be modified
# synchronously. This will raise an assignment error out of while block.
# Here we make a copy of the mutable vars to avoid this problem.
if has_mutable_vars_in_loop:
new_loop_vars = copy_mutable_vars(loop_vars)
output_vars = body(*new_loop_vars)
else:
output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)):
output_vars = [output_vars]
try:
loop_vars = _deal_with_undefined_var(output_vars, loop_vars)
assert_same_structure(output_vars, loop_vars, check_types=False)
except ValueError as e:
raise ValueError(
"body in while_loop should return the same arity "
"(length and structure) as loop_vars: {0}".format(e)
)
now_cond = cond(*output_vars)
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
assign(now_cond, pre_cond)
return loop_vars


# (TODO: Mine) There exists dependency. It will be removed later.
Expand Down
18 changes: 8 additions & 10 deletions python/paddle/fluid/layers/layer_function_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,10 @@
Variable,
core,
convert_np_dtype_to_dtype_,
_non_static_mode,
in_dygraph_mode,
_in_legacy_dygraph,
)
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from paddle import _C_ops, _legacy_C_ops

__all__ = [
Expand Down Expand Up @@ -276,7 +273,7 @@ def func(x, name=None):
return op(x)
# TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done.
if _non_static_mode():
if in_dygraph_mode() and hasattr(_legacy_C_ops, op_type):
op = getattr(_legacy_C_ops, op_type)
return op(x)

Expand Down Expand Up @@ -327,15 +324,16 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type = inplace_op_type[:-1]

def func(x, name=None):
if _non_static_mode():
if in_dygraph_mode():
op = getattr(_legacy_C_ops, inplace_op_type)
return op(x)
warnings.warn(
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format(
inplace_op_type, origin_op_type
else:
warnings.warn(
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format(
inplace_op_type, origin_op_type
)
)
)
return generate_activation_fn(origin_op_type)(x, name)
return generate_activation_fn(origin_op_type)(x, name)

func.__name__ = inplace_op_type
func.__doc__ = """
Expand Down
25 changes: 15 additions & 10 deletions python/paddle/fluid/layers/learning_rate_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,14 @@
from . import control_flow
from . import nn
from . import tensor
from ..framework import default_main_program, Parameter, unique_name, name_scope
from ..framework import (
default_main_program,
Parameter,
unique_name,
name_scope,
in_dygraph_mode,
)
from ..framework import Variable
from ..framework import _non_static_mode
from ..dygraph import learning_rate_scheduler as imperate_lr
from ..data_feeder import check_variable_and_dtype, check_type

Expand Down Expand Up @@ -99,7 +104,7 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0):
learning_rate)
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.NoamDecay(
d_model, warmup_steps, learning_rate=learning_rate
)
Expand Down Expand Up @@ -160,7 +165,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.ExponentialDecay(
learning_rate, decay_steps, decay_rate, staircase
)
Expand Down Expand Up @@ -222,7 +227,7 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.NaturalExpDecay(
learning_rate, decay_steps, decay_rate, staircase
)
Expand Down Expand Up @@ -282,7 +287,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
staircase=True))
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.InverseTimeDecay(
learning_rate, decay_steps, decay_rate, staircase
)
Expand Down Expand Up @@ -337,7 +342,7 @@ def polynomial_decay(
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.PolynomialDecay(
learning_rate, decay_steps, end_learning_rate, power, cycle
)
Expand Down Expand Up @@ -414,7 +419,7 @@ def piecewise_decay(boundaries, values):
if len(values) - len(boundaries) != 1:
raise ValueError("len(values) - len(boundaries) should be 1")

if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.PiecewiseDecay(boundaries, values, 0)
return decay
else:
Expand Down Expand Up @@ -488,7 +493,7 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
)

with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.CosineDecay(
learning_rate, step_each_epoch, epochs
)
Expand Down Expand Up @@ -569,7 +574,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
linear_step = float(end_lr) - float(start_lr)
with default_main_program()._lr_schedule_guard():

if _non_static_mode():
if in_dygraph_mode():
lr = imperate_lr.LinearLrWarmup(
learning_rate, warmup_steps, start_lr, end_lr
)
Expand Down
Loading

0 comments on commit 1c7ae95

Please sign in to comment.