Skip to content

Commit

Permalink
[Hackathon 5th No.49][pir] add logical compare method - Part 4 (Paddl…
Browse files Browse the repository at this point in the history
  • Loading branch information
gouzil authored Oct 30, 2023
1 parent ae10e7c commit e6b38f4
Show file tree
Hide file tree
Showing 2 changed files with 277 additions and 14 deletions.
46 changes: 41 additions & 5 deletions python/paddle/pir/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,14 +261,16 @@ def __impl__(self, other_var):
break
else:
# when break is not triggered, enter the else branch
other_var_opresult = paddle.fill_constant(
self.shape,
lhs_dtype,
other_var,
other_var_opresult = (
paddle.tensor.creation.fill_constant(
self.shape,
lhs_dtype,
other_var,
)
)
else:
# add fill_op to current_block
other_var_opresult = paddle.fill_constant(
other_var_opresult = paddle.tensor.creation.fill_constant(
[],
lhs_dtype,
other_var,
Expand Down Expand Up @@ -390,6 +392,34 @@ def __impl__(self, other_var):
'__matmul__',
_binary_creator_('__matmul__', paddle.tensor.matmul, False, None),
),
# for logical compare
# TODO(gouzil): Open after deleting c++ logic
# (
# '__eq__',
# _binary_creator_('__eq__', paddle.tensor.equal, False, None),
# ),
(
'__ne__',
_binary_creator_('__ne__', paddle.tensor.not_equal, False, None),
),
(
'__lt__',
_binary_creator_('__lt__', paddle.tensor.less_than, False, None),
),
(
'__le__',
_binary_creator_('__le__', paddle.tensor.less_equal, False, None),
),
(
'__gt__',
_binary_creator_('__gt__', paddle.tensor.greater_than, False, None),
),
(
'__ge__',
_binary_creator_(
'__ge__', paddle.tensor.greater_equal, False, None
),
),
]

global _already_patch_opresult
Expand All @@ -409,6 +439,12 @@ def __impl__(self, other_var):
if method_impl:
setattr(OpResult, method_name, method_impl)

# Bit operation symbol
for magic_method, origin_method in paddle.tensor.magic_method_func:
impl = getattr(paddle.tensor, origin_method, None)
if impl:
setattr(OpResult, magic_method, impl)

# Handling __getitem__
from ..base.variable_index import _getitem_static

Expand Down
245 changes: 236 additions & 9 deletions test/legacy_test/test_math_op_patch_pir.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,8 @@ def test_pow(self):
y_np = np.random.random([10, 1024]).astype('float32')
res_np_b = x_np**y_np
res_np_c = paddle.pow(paddle.to_tensor(x_np), 2)
# TODO(gouzil): solve paddle.fill_constant problem
# res_np_d = x_np.__pow__(2)
# res_np_e = x_np.__rpow__(2)
res_np_d = x_np.__pow__(2)
res_np_e = x_np.__rpow__(2)
paddle.enable_static()
# Calculate results under pir
with paddle.pir_utils.IrGuard():
Expand All @@ -64,19 +63,19 @@ def test_pow(self):
)
b = x**y
c = x.pow(2)
# d = x.__pow__(2)
# e = x.__rpow__(2)
d = x.__pow__(2)
e = x.__rpow__(2)
# TODO(gouzil): Why not use `paddle.static.default_main_program()`?
# Because different case do not isolate parameters (This is a known problem)
(b_np, c_np) = exe.run(
(b_np, c_np, d_np, e_np) = exe.run(
main_program,
feed={"x": x_np, "y": y_np},
fetch_list=[b, c],
fetch_list=[b, c, d, e],
)
np.testing.assert_allclose(res_np_b, b_np, rtol=1e-05)
np.testing.assert_allclose(res_np_c, c_np, rtol=1e-05)
# np.testing.assert_allclose(res_np_d, d_np, rtol=1e-05)
# np.testing.assert_allclose(res_np_e, e_np, rtol=1e-05)
np.testing.assert_allclose(res_np_d, d_np, rtol=1e-05)
np.testing.assert_allclose(res_np_e, e_np, rtol=1e-05)

def test_mod(self):
paddle.disable_static()
Expand Down Expand Up @@ -163,6 +162,234 @@ def test_floordiv(self):
np.testing.assert_allclose(res_np_c, c_np, atol=1e-05)
np.testing.assert_allclose(res_np_d, d_np, atol=1e-05)

def test_bitwise_not(self):
paddle.disable_static()
x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
res_np_b = ~x_np
res_np_c = paddle.bitwise_not(paddle.to_tensor(x_np))
res_np_d = x_np.__invert__()
paddle.enable_static()
with paddle.pir_utils.IrGuard():
main_program, exe, program_guard = new_program()
with program_guard:
x = paddle.static.data(name='x', shape=[2, 3, 5], dtype='int32')
b = ~x
c = x.bitwise_not()
d = x.__invert__()
(b_np, c_np, d_np) = exe.run(
main_program,
feed={"x": x_np},
fetch_list=[b, c, d],
)
np.testing.assert_array_equal(res_np_b, b_np)
np.testing.assert_array_equal(res_np_c, c_np)
np.testing.assert_array_equal(res_np_d, d_np)

def test_bitwise_xor(self):
paddle.disable_static()
x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
y_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
res_np_b = x_np ^ y_np
res_np_c = paddle.bitwise_xor(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_d = x_np.__xor__(y_np)
paddle.enable_static()
with paddle.pir_utils.IrGuard():
main_program, exe, program_guard = new_program()
with program_guard:
x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32")
y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32")
b = x ^ y
c = x.bitwise_xor(y)
d = x.__xor__(y)
(b_np, c_np, d_np) = exe.run(
main_program,
feed={"x": x_np, "y": y_np},
fetch_list=[b, c, d],
)
np.testing.assert_array_equal(res_np_b, b_np)
np.testing.assert_array_equal(res_np_c, c_np)
np.testing.assert_array_equal(res_np_d, d_np)

def test_bitwise_or(self):
paddle.disable_static()
x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
y_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
res_np_b = x_np | y_np
res_np_c = paddle.bitwise_or(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_d = x_np.__or__(y_np)
paddle.enable_static()
with paddle.pir_utils.IrGuard():
main_program, exe, program_guard = new_program()
with program_guard:
x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32")
y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32")
b = x | y
c = x.bitwise_or(y)
d = x.__or__(y)
(b_np, c_np, d_np) = exe.run(
main_program,
feed={"x": x_np, "y": y_np},
fetch_list=[b, c, d],
)
np.testing.assert_array_equal(res_np_b, b_np)
np.testing.assert_array_equal(res_np_c, c_np)
np.testing.assert_array_equal(res_np_d, d_np)

def test_bitwise_and(self):
paddle.disable_static()
x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
y_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
res_np_b = x_np & y_np
res_np_c = paddle.bitwise_and(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_d = x_np.__and__(y_np)
paddle.enable_static()
with paddle.pir_utils.IrGuard():
main_program, exe, program_guard = new_program()
with program_guard:
x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32")
y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32")
b = x & y
c = x.bitwise_and(y)
d = x.__and__(y)
(b_np, c_np, d_np) = exe.run(
main_program,
feed={"x": x_np, "y": y_np},
fetch_list=[b, c, d],
)
np.testing.assert_array_equal(res_np_b, b_np)
np.testing.assert_array_equal(res_np_c, c_np)
np.testing.assert_array_equal(res_np_d, d_np)

# for logical compare
def test_equal_and_nequal(self):
paddle.disable_static()
x_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32')
y_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32')
# TODO(gouzil): Open after deleting c++ logic
# res_np_b = x_np == y_np
# res_np_c = paddle.equal(paddle.to_tensor(x_np), paddle.to_tensor(y_np))
# res_np_d = x_np.__eq__(y_np)
res_np_e = x_np != y_np
res_np_f = paddle.not_equal(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_g = x_np.__ne__(y_np)
paddle.enable_static()
with paddle.pir_utils.IrGuard():
main_program, exe, program_guard = new_program()
with program_guard:
x = paddle.static.data(name="x", shape=[-1, 1], dtype='float32')
y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32')
# b = x == y
# c = x.equal(y)
# d = x.__eq__(y)
e = x != y
f = x.not_equal(y)
g = x.__ne__(y)
(e_np, f_np, g_np) = exe.run(
main_program,
feed={"x": x_np, "y": y_np},
fetch_list=[e, f, g],
)
# np.testing.assert_array_equal(res_np_b, b_np)
# np.testing.assert_array_equal(res_np_c, c_np)
# np.testing.assert_array_equal(res_np_d, d_np)
np.testing.assert_array_equal(res_np_e, e_np)
np.testing.assert_array_equal(res_np_f, f_np)
np.testing.assert_array_equal(res_np_g, g_np)

def test_less(self):
paddle.disable_static()
x_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32')
y_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32')
z_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32')
res_np_b = x_np < y_np
res_np_c = paddle.less_than(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_d = x_np.__lt__(y_np)
res_np_e = x_np <= y_np
res_np_f = paddle.less_equal(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_g = x_np.__le__(y_np)
res_np_h = x_np <= z_np
paddle.enable_static()
with paddle.pir_utils.IrGuard():
main_program, exe, program_guard = new_program()
with program_guard:
x = paddle.static.data(name="x", shape=[-1, 1], dtype='float32')
y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32')
z = paddle.static.data(name="z", shape=[-1, 1], dtype='float32')
b = x < y
c = x.less_than(y)
d = x.__lt__(y)
e = x <= y
f = x.less_equal(y)
g = x.__le__(y)
h = x <= z
(b_np, c_np, d_np, e_np, f_np, g_np, h_np) = exe.run(
main_program,
feed={"x": x_np, "y": y_np, "z": z_np},
fetch_list=[b, c, d, e, f, g, h],
)
np.testing.assert_array_equal(res_np_b, b_np)
np.testing.assert_array_equal(res_np_c, c_np)
np.testing.assert_array_equal(res_np_d, d_np)
np.testing.assert_array_equal(res_np_e, e_np)
np.testing.assert_array_equal(res_np_f, f_np)
np.testing.assert_array_equal(res_np_g, g_np)
np.testing.assert_array_equal(res_np_h, h_np)

def test_greater(self):
paddle.disable_static()
x_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32')
y_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32')
z_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32')
res_np_b = x_np > y_np
res_np_c = paddle.greater_than(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_d = x_np.__gt__(y_np)
res_np_e = x_np >= y_np
res_np_f = paddle.greater_equal(
paddle.to_tensor(x_np), paddle.to_tensor(y_np)
)
res_np_g = x_np.__ge__(y_np)
res_np_h = x_np >= z_np
paddle.enable_static()
with paddle.pir_utils.IrGuard():
main_program, exe, program_guard = new_program()
with program_guard:
x = paddle.static.data(name="x", shape=[-1, 1], dtype='float32')
y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32')
z = paddle.static.data(name="z", shape=[-1, 1], dtype='float32')
b = x > y
c = x.greater_than(y)
d = x.__gt__(y)
e = x >= y
f = x.greater_equal(y)
g = x.__ge__(y)
h = x >= z
(b_np, c_np, d_np, e_np, f_np, g_np, h_np) = exe.run(
main_program,
feed={"x": x_np, "y": y_np, "z": z_np},
fetch_list=[b, c, d, e, f, g, h],
)
np.testing.assert_array_equal(res_np_b, b_np)
np.testing.assert_array_equal(res_np_c, c_np)
np.testing.assert_array_equal(res_np_d, d_np)
np.testing.assert_array_equal(res_np_e, e_np)
np.testing.assert_array_equal(res_np_f, f_np)
np.testing.assert_array_equal(res_np_g, g_np)
np.testing.assert_array_equal(res_np_h, h_np)

def test_item(self):
with paddle.pir_utils.IrGuard():
x = paddle.static.data(name='x', shape=[3, 2, 1])
Expand Down

0 comments on commit e6b38f4

Please sign in to comment.