Skip to content
6 changes: 3 additions & 3 deletions python/paddle/framework/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,9 +246,9 @@ def _manual_program_seed(seed):
Returns:
None
"""
base.default_main_program().random_seed = seed
base.default_startup_program().random_seed = seed
program = base.Program()
paddle.static.default_main_program().random_seed = seed
paddle.static.default_startup_program().random_seed = seed
program = paddle.static.Program()
program.global_seed(seed)


Expand Down
22 changes: 19 additions & 3 deletions python/paddle/nn/initializer/uniform.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle import _C_ops
from paddle import _C_ops, pir

from ...base import core, framework, unique_name
from ...base.data_feeder import check_variable_and_dtype
from ...base.framework import _current_expected_place, in_dygraph_mode
from ...base.framework import (
_current_expected_place,
in_dygraph_mode,
in_pir_mode,
)
from .initializer import Initializer

__all__ = []
Expand Down Expand Up @@ -71,7 +75,7 @@ def forward(self, var, block=None):
"""
block = self._check_block(block)

assert isinstance(block, framework.Block)
assert isinstance(block, (framework.Block, pir.Block))
if not in_dygraph_mode():
check_variable_and_dtype(
var,
Expand Down Expand Up @@ -114,6 +118,18 @@ def forward(self, var, block=None):
else:
out_var._share_underline_tensor_to(var)
return None
elif in_pir_mode():
out_var = _C_ops.uniform(
var.shape,
out_dtype,
self._low,
self._high,
self._seed,
_current_expected_place(),
)
if var.dtype == core.DataType.FLOAT16:
return _C_ops.cast(out_var, var.dtype)
return out_var
else:
op = block.append_op(
type="uniform_random",
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/layer/norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@ def __init__(
)

def forward(self, input):
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.group_norm(
input,
self.weight,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,7 +606,7 @@ def cosh(x, name=None):
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.08107233, 1.02006674, 1.00500417, 1.04533851])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.cosh(x)
else:
check_variable_and_dtype(
Expand Down
27 changes: 17 additions & 10 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1041,14 +1041,19 @@ def setUp(self):

self.convert_input_output()

def test_check_output(self):
self.check_output(check_pir=True)

def test_check_grad(self):
if self.dtype == np.float16:
return
if self.dtype == np.complex64 or self.dtype == np.complex128:
# Complex64 [CPU]: AssertionError: 0.006845869 not less than or equal to 0.005
self.check_grad(['X'], 'Out', max_relative_error=0.007)
self.check_grad(
['X'], 'Out', max_relative_error=0.007, check_pir=True
)
else:
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_pir=True)


class TestCosh_Complex64(TestCosh):
Expand All @@ -1075,13 +1080,16 @@ def test_dygraph(self):
z_expected = np.cosh(np_x)
np.testing.assert_allclose(z, z_expected, rtol=1e-05)

@test_with_pir_api
def test_api(self):
main = paddle.static.Program()
startup = paddle.static.Program()
with static_guard():
test_data_shape = [11, 17]
with base.program_guard(base.Program(), base.Program()):
input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
"float32"
)
input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
"float32"
)
with base.program_guard(main, startup):
data_x = paddle.static.data(
name="data_x",
shape=test_data_shape,
Expand All @@ -1090,9 +1098,8 @@ def test_api(self):

pd_cosh_out = paddle.cosh(data_x)
exe = base.Executor(place=base.CPUPlace())
exe.run(base.default_startup_program())
(np_cosh_res,) = exe.run(
base.default_main_program(),
main,
feed={"data_x": input_x},
fetch_list=[pd_cosh_out],
)
Expand Down Expand Up @@ -4820,7 +4827,7 @@ def test_check_grad(self):
)
create_test_act_fp16_class(TestCos, check_pir=True)
create_test_act_fp16_class(TestTan, check_pir=True)
create_test_act_fp16_class(TestCosh)
create_test_act_fp16_class(TestCosh, check_pir=True)
create_test_act_fp16_class(TestAcos, check_pir=True)
create_test_act_fp16_class(TestSin, check_pir=True)
create_test_act_fp16_class(TestSinh)
Expand Down Expand Up @@ -4981,7 +4988,7 @@ def test_check_grad(self):
)
create_test_act_bf16_class(TestCos, check_pir=True)
create_test_act_bf16_class(TestTan, check_pir=True)
create_test_act_bf16_class(TestCosh)
create_test_act_bf16_class(TestCosh, check_pir=True)
create_test_act_bf16_class(TestAcos, check_pir=True)
create_test_act_bf16_class(TestSin, check_pir=True)
create_test_act_bf16_class(TestSinh)
Expand Down
32 changes: 20 additions & 12 deletions test/legacy_test/test_group_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def test_check_output(self):
inplace_atol = 0
place = core.CPUPlace()

self.check_output_with_place(place, atol=atol)
self.check_output_with_place(place, atol=atol, check_pir=True)

if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
Expand All @@ -132,7 +132,7 @@ def test_check_output(self):
# relative error is 1e-05 in numpy.allclose by default.
# Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
self.check_output_with_place(
place, atol=atol, inplace_atol=inplace_atol
place, atol=atol, inplace_atol=inplace_atol, check_pir=True
)

def do_compare_between_place(self):
Expand Down Expand Up @@ -169,13 +169,13 @@ def test_check_grad(self):
return

place = core.CPUPlace()
self.check_grad_with_place(place, {'X', 'Scale', 'Bias'}, 'Y')
self.check_grad_with_place(
place, {'X', 'Scale', 'Bias'}, 'Y', check_pir=True
)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place,
{'X', 'Scale', 'Bias'},
'Y',
place, {'X', 'Scale', 'Bias'}, 'Y', check_pir=True
)

def init_test_case(self):
Expand All @@ -201,14 +201,16 @@ def test_check_output(self):
# Set to inplace_atol to 0, which means the absolute error is 0, and the
# relative error is 1e-05 in numpy.allclose by default.
# Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
self.check_output_with_place(place)
self.check_output_with_place(place, check_pir=True)

def test_check_grad(self):
if self.compare_between_place:
return

place = core.CUDAPlace(0)
self.check_grad_with_place(place, {'X', 'Scale', 'Bias'}, 'Y')
self.check_grad_with_place(
place, {'X', 'Scale', 'Bias'}, 'Y', check_pir=True
)

def init_test_case(self):
self.dtype = np.float16
Expand Down Expand Up @@ -266,14 +268,16 @@ def test_check_output(self):
# Set to inplace_atol to 0, which means the absolute error is 0, and the
# relative error is 1e-05 in numpy.allclose by default.
# Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
self.check_output_with_place(place)
self.check_output_with_place(place, check_pir=True)

def test_check_grad(self):
if self.compare_between_place:
return

place = core.CUDAPlace(0)
self.check_grad_with_place(place, {'X', 'Scale', 'Bias'}, 'Y')
self.check_grad_with_place(
place, {'X', 'Scale', 'Bias'}, 'Y', check_pir=True
)

def init_test_case(self):
pass
Expand Down Expand Up @@ -366,7 +370,11 @@ def test_check_output(self):
inplace_atol = 2e-3
place = core.CUDAPlace(0)
self.check_output_with_place(
place, rtol=rtol, atol=atol, inplace_atol=inplace_atol
place,
rtol=rtol,
atol=atol,
inplace_atol=inplace_atol,
check_pir=True,
)


Expand Down Expand Up @@ -418,7 +426,7 @@ def setUp(self):
def test_check_output(self):
rtol = 2e-2
place = core.CUDAPlace(0)
self.check_output_with_place(place, rtol=rtol)
self.check_output_with_place(place, rtol=rtol, check_pir=True)


class TestGroupNormOpBigEps1_With_NHWC(TestGroupNormOp):
Expand Down
123 changes: 72 additions & 51 deletions test/legacy_test/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,9 @@ def get_static_graph_result(
self, feed, fetch_list, with_lod=False, force_to_use_cpu=False
):
exe = base.Executor(self._get_place(force_to_use_cpu))
exe.run(base.default_startup_program())
exe.run(paddle.static.default_startup_program())
return exe.run(
base.default_main_program(),
paddle.static.default_main_program(),
feed=feed,
fetch_list=fetch_list,
return_numpy=(not with_lod),
Expand Down Expand Up @@ -795,62 +795,83 @@ def test_group_norm(self):

shape = (2, 4, 3, 3)

input = np.random.random(shape).astype('float32')
def _test_static_specific(input):
with self.static_graph():
X = paddle.static.data(
name='X', shape=shape, dtype='float32', lod_level=1
)
ret = paddle.static.nn.group_norm(
input=X,
groups=2,
param_attr=paddle.nn.initializer.Uniform(
low=-0.5, high=0.5
),
bias_attr=paddle.nn.initializer.Constant(value=1),
)
static_ret = self.get_static_graph_result(
feed={
'X': base.create_lod_tensor(
data=input, recursive_seq_lens=[[1, 1]], place=place
)
},
fetch_list=[ret],
with_lod=True,
)[0]

with self.static_graph():
X = paddle.static.data(
name='X', shape=shape, dtype='float32', lod_level=1
)
ret = paddle.static.nn.group_norm(
input=X,
groups=2,
param_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
bias_attr=paddle.nn.initializer.Constant(value=1),
)
static_ret = self.get_static_graph_result(
feed={
'X': base.create_lod_tensor(
data=input, recursive_seq_lens=[[1, 1]], place=place
)
},
fetch_list=[ret],
with_lod=True,
)[0]
return static_ret

with self.static_graph():
X = paddle.static.data(
name='X', shape=shape, dtype='float32', lod_level=1
)
groupNorm = paddle.nn.GroupNorm(
num_channels=shape[1],
num_groups=2,
weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
bias_attr=paddle.nn.initializer.Constant(value=1),
)
ret = groupNorm(X)
static_ret2 = self.get_static_graph_result(
feed={
'X': base.create_lod_tensor(
data=input, recursive_seq_lens=[[1, 1]], place=place
)
},
fetch_list=[ret],
with_lod=True,
)[0]
def _test_static(input):
with self.static_graph():
X = paddle.static.data(
name='X', shape=shape, dtype='float32', lod_level=1
)
groupNorm = paddle.nn.GroupNorm(
num_channels=shape[1],
num_groups=2,
weight_attr=paddle.nn.initializer.Uniform(
low=-0.5, high=0.5
),
bias_attr=paddle.nn.initializer.Constant(value=1),
)
ret = groupNorm(X)
static_ret2 = self.get_static_graph_result(
feed={
'X': base.create_lod_tensor(
data=input, recursive_seq_lens=[[1, 1]], place=place
)
},
fetch_list=[ret, groupNorm.weight],
with_lod=True,
)[0]

with self.dynamic_graph():
groupNorm = paddle.nn.GroupNorm(
num_channels=shape[1],
num_groups=2,
weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5),
bias_attr=paddle.nn.initializer.Constant(value=1),
)
dy_ret = groupNorm(to_variable(input))
dy_rlt_value = dy_ret.numpy()
return static_ret2

def _test_dygraph(input):
with self.dynamic_graph():
groupNorm = paddle.nn.GroupNorm(
num_channels=shape[1],
num_groups=2,
weight_attr=paddle.nn.initializer.Uniform(
low=-0.5, high=0.5
),
bias_attr=paddle.nn.initializer.Constant(value=1),
)
dy_ret = groupNorm(to_variable(input))
dy_rlt_value = dy_ret.numpy()
return dy_rlt_value

input = np.random.random(shape).astype('float32')
static_ret = _test_static_specific(input)
static_ret2 = _test_static(input)
dy_rlt_value = _test_dygraph(input)
np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)

with paddle.pir_utils.IrGuard():
static_ret_pir = _test_static(input)

np.testing.assert_allclose(static_ret2, static_ret_pir, rtol=1e-05)

def test_instance_norm(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
Expand Down