From 86726d559c06caf9cef41a9812f756a882ed3be5 Mon Sep 17 00:00:00 2001 From: llyyxx0413 <106639175+llyyxx0413@users.noreply.github.com> Date: Wed, 18 Oct 2023 10:55:49 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Hackathon=20No.8=E3=80=91add=20api=20h?= =?UTF-8?q?ypot=20&=20hypot=5F=20(#57295)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- python/paddle/__init__.py | 4 ++ python/paddle/tensor/__init__.py | 4 ++ python/paddle/tensor/math.py | 53 ++++++++++++++++ test/legacy_test/test_hypot.py | 104 +++++++++++++++++++++++++++++++ test/legacy_test/test_inplace.py | 53 ++++++++++++++++ 5 files changed, 218 insertions(+) create mode 100644 test/legacy_test/test_hypot.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 5b3e806c3f947b..11a2d07d2096dd 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -407,6 +407,8 @@ i1e, polygamma, polygamma_, + hypot, + hypot_, ) from .tensor.random import ( @@ -904,4 +906,6 @@ 'i1e', 'polygamma', 'polygamma_', + 'hypot', + 'hypot_', ] diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 61005132276d91..ce4cfc8ee883ba 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -329,6 +329,8 @@ from .math import polygamma_ # noqa: F401 from .math import renorm # noqa: F401 from .math import renorm_ # noqa: F401 +from .math import hypot # noqa: F401 +from .math import hypot_ # noqa: F401 from .random import multinomial # noqa: F401 from .random import standard_normal # noqa: F401 @@ -464,6 +466,8 @@ 'sum', 'nan_to_num', 'nan_to_num_', + 'hypot', + 'hypot_', 'nansum', 'nanmean', 'count_nonzero', diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 3c5e9f3e34d364..acdd3a35b57d08 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -6932,3 +6932,56 @@ def ldexp_(x, y, name=None): y = paddle.cast(y, dtype=out_dtype) two = paddle.to_tensor(2, dtype=out_dtype) return paddle.multiply_(x, paddle.pow(two, y)) + + +def hypot(x, y, name=None): + """ + Calculate the length of the hypotenuse of a right-angle triangle. The equation is: + + .. math:: + out = {\\sqrt{x^2 + y^2}} + + Args: + x (Tensor): The input Tensor, the data type is float32, float64, int32 or int64. + y (Tensor): The input Tensor, the data type is float32, float64, int32 or int64. + name (str, optional): Name for the operation (optional, default is None).For more information, please refer to :ref:`api_guide_Name`. + + Returns: + out (Tensor): An N-D Tensor. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. And the data type is float32 or float64. + + Examples: + + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([3], dtype='float32') + >>> y = paddle.to_tensor([4], dtype='float32') + >>> res = paddle.hypot(x, y) + >>> print(res) + Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, + [5.]) + + """ + if not isinstance(x, (paddle.Tensor, Variable)): + raise TypeError(f"x must be tensor type, but got {type(x)}") + if not isinstance(y, (paddle.Tensor, Variable)): + raise TypeError(f"y must be tensor type, but got {type(y)}") + + out = (paddle.pow(x, 2) + paddle.pow(y, 2)).sqrt() + return out + + +@inplace_apis_in_dygraph_only +def hypot_(x, y, name=None): + r""" + Inplace version of ``hypot`` API, the output Tensor will be inplaced with input ``x``. + Please refer to :ref:`api_paddle_hypot`. + """ + if not isinstance(x, (paddle.Tensor, Variable)): + raise TypeError(f"x must be tensor type, but got {type(x)}") + if not isinstance(y, (paddle.Tensor, Variable)): + raise TypeError(f"y must be tensor type, but got {type(y)}") + + out = x.pow_(2).add_(y.pow(2)).sqrt_() + return out diff --git a/test/legacy_test/test_hypot.py b/test/legacy_test/test_hypot.py new file mode 100644 index 00000000000000..66a049038eb5ae --- /dev/null +++ b/test/legacy_test/test_hypot.py @@ -0,0 +1,104 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +import paddle +from paddle import base +from paddle.base import core + +paddle.enable_static() + + +class TestHypotAPI(unittest.TestCase): + def setUp(self): + self.x_shape = [10, 10] + self.y_shape = [10, 1] + self.x_np = np.random.uniform(-10, 10, self.x_shape).astype(np.float32) + self.y_np = np.random.uniform(-10, 10, self.y_shape).astype(np.float32) + + def test_static_graph(self): + paddle.enable_static() + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(startup_program, train_program): + x = paddle.static.data( + name='input1', dtype='float32', shape=self.x_shape + ) + y = paddle.static.data( + name='input2', dtype='float32', shape=self.y_shape + ) + out = paddle.hypot(x, y) + + place = ( + base.CUDAPlace(0) + if core.is_compiled_with_cuda() + else base.CPUPlace() + ) + exe = base.Executor(place) + res = exe.run( + base.default_main_program(), + feed={'input1': self.x_np, 'input2': self.y_np}, + fetch_list=[out], + ) + np_out = np.hypot(self.x_np, self.y_np) + np.testing.assert_allclose(res[0], np_out, atol=1e-5, rtol=1e-5) + paddle.disable_static() + + def test_dygraph(self): + paddle.disable_static() + x = paddle.to_tensor(self.x_np) + y = paddle.to_tensor(self.y_np) + result = paddle.hypot(x, y) + np.testing.assert_allclose( + np.hypot(self.x_np, self.y_np), result.numpy(), rtol=1e-05 + ) + + paddle.enable_static() + + def test_error(self): + x = paddle.to_tensor(self.x_np) + y = 3.8 + self.assertRaises(TypeError, paddle.hypot, x, y) + self.assertRaises(TypeError, paddle.hypot, y, x) + + +class TestHypotAPIBroadCast(TestHypotAPI): + def setUp(self): + self.x_np = np.arange(6).astype(np.float32) + self.y_np = np.array([20]).astype(np.float32) + self.x_shape = [6] + self.y_shape = [1] + + +class TestHypotAPI3(TestHypotAPI): + def setUp(self): + self.x_shape = [] + self.y_shape = [] + self.x_np = np.random.uniform(-10, 10, self.x_shape).astype(np.float32) + self.y_np = np.random.uniform(-10, 10, self.y_shape).astype(np.float32) + + +class TestHypotAPI4(TestHypotAPI): + def setUp(self): + self.x_shape = [1] + self.y_shape = [1] + self.x_np = np.random.uniform(-10, 10, self.x_shape).astype(np.float32) + self.y_np = np.random.uniform(-10, 10, self.y_shape).astype(np.float32) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index e3f1de1048e113..5be252c3779546 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -834,6 +834,59 @@ def test_error(self): self.assertRaises(ValueError, paddle.gcd_, x, y) +class TestDygraphInplaceHypot(TestDygraphInplace): + def init_data(self): + self.input_var_numpy = np.random.randint(2, size=200) + self.input_var_numpy = self.input_var_numpy.reshape([10, 20]) + self.dtype = "float32" + self.y = paddle.randn(shape=[10, 20], dtype="float32") + + def inplace_api_processing(self, var): + return paddle.hypot_(var, self.y) + + def non_inplace_api_processing(self, var): + return paddle.hypot(var, self.y) + + def test_errors(self): + x = 3.0 + self.assertRaises(TypeError, paddle.hypot_, x, self.y) + self.assertRaises(TypeError, paddle.hypot_, self.y, x) + + def test_forward_version(self): + with paddle.base.dygraph.guard(): + var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) + self.assertEqual(var.inplace_version, 0) + + inplace_var = self.inplace_api_processing(var) + self.assertEqual(var.inplace_version, 3) + + inplace_var[0] = 2.0 + self.assertEqual(var.inplace_version, 4) + + inplace_var = self.inplace_api_processing(inplace_var) + self.assertEqual(var.inplace_version, 7) + + def test_backward_error(self): + # It raises an error because the inplace operator will result + # in incorrect gradient computation. + with paddle.base.dygraph.guard(): + var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) + var_a.stop_gradient = False + + var_b = var_a**2 + # Here, the gradient computation will use the value of var_b + var_c = var_b**2 + self.inplace_api_processing(var_b) + var_c = paddle.cast(var_c, "float32") + + loss = paddle.nn.functional.relu(var_c) + with self.assertRaisesRegex( + RuntimeError, + f"received tensor_version:{3} != wrapper_version_snapshot:{0}", + ): + loss.backward() + + class TestDygraphInplaceNanToNum(TestDygraphInplace): def init_data(self): self.input_var_numpy = np.array(