Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 0 additions & 46 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@
'autoincreased_step_counter',
'unsqueeze',
'lod_reset',
'relu',
'clip',
'clip_by_norm',
'mul',
Expand Down Expand Up @@ -2074,51 +2073,6 @@ def lod_reset(x, y=None, target_lod=None):
return out


@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu")
def relu(x, name=None):
"""
${comment}

Args:
x(Variable): ${x_comment}
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.

Returns:
Variable: ${out_comment}

Examples:

.. code-block:: python

import paddle.fluid as fluid
import numpy as np
in1 = np.array([[-1,0],[1,2.6]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.relu(x1)
print(out1.numpy())
# [[0. 0. ]
# [1. 2.6]]"""

if in_dygraph_mode():
return _C_ops.relu(x)
if _in_legacy_dygraph():
return _legacy_C_ops.relu(x)

check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')

inputs = {'X': [x]}
helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out}
)
return out


def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if _non_static_mode():
op = getattr(_legacy_C_ops, op_name)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -1810,7 +1810,7 @@ def reader():
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
relu = paddle.nn.functional.relu(image)
"""

def __init__(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def dyfunc_with_if_else2(x, col=100):
# col = -1
col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64")
if paddle.mean(x).numpy()[0] > x.numpy()[row][col]:
y = fluid.layers.relu(x)
y = paddle.nn.functional.relu(x)
else:
x_pow = paddle.pow(x, 2)
y = paddle.tanh(x_pow)
Expand Down Expand Up @@ -163,7 +163,7 @@ def nested_if_else(x_v):
w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10)
if y.numpy()[0] < 10:
tmp = y * w
y = fluid.layers.relu(tmp)
y = paddle.nn.functional.relu(tmp)
if paddle.mean(y).numpy()[0] < batch_size:
y = paddle.abs(y)
else:
Expand Down Expand Up @@ -273,7 +273,7 @@ def forward(self, input):
# Create new var, but is not used.
x = 10
tmp = y * self.constant_vars['w']
y = fluid.layers.relu(tmp)
y = paddle.nn.functional.relu(tmp)
# Nested `if/else`
if y.numpy()[-1] < self.alpha:
# Modify variable of class
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def build_program(self, dtype):
tmp_0 = self.feed_vars[0] * self.feed_vars[1]
tmp_1 = layers.mul(tmp_0, self.feed_vars[2])
# subgraph with 2 op nodes
tmp_2 = layers.relu(tmp_0 + tmp_1)
tmp_2 = paddle.nn.functional.relu(tmp_0 + tmp_1)

self.append_gradients(tmp_2)

Expand Down Expand Up @@ -146,7 +146,7 @@ def build_program(self, dtype):
tmp_3 = layers.mul(tmp_0, self.feed_vars[2])
# subgraph with 4 op nodes
tmp_3 = layers.cast(tmp_2, dtype="float16")
tmp_4 = layers.relu(tmp_1 + tmp_3)
tmp_4 = paddle.nn.functional.relu(tmp_1 + tmp_3)
tmp_5 = layers.cast(tmp_4, dtype=dtype)
tmp_3 = layers.cast(tmp_2, dtype=dtype)

Expand Down