Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
6004c25
Move api(lgamma) from legacy_api.yaml to api.yaml
Charles-hit Jul 15, 2022
68bb390
Move api(lgamma) from legacy_api.yaml to api.yaml
Charles-hit Jul 15, 2022
570b32f
Move api(lgamma) from legacy_api.yaml to api.yaml
Charles-hit Jul 15, 2022
b44c972
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 15, 2022
f431458
modify code style
Charles-hit Jul 15, 2022
dfa61a6
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 15, 2022
1f31eac
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 18, 2022
ea24f06
add x to X mapping
Charles-hit Jul 18, 2022
904727a
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 18, 2022
7a4e01a
add definition of lgamma
Charles-hit Jul 18, 2022
03cb1a9
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 18, 2022
716f5d0
delete redundant lgamma definitions
Charles-hit Jul 19, 2022
5403032
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 19, 2022
0609703
Modify code comments
Charles-hit Jul 19, 2022
3b1f527
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 19, 2022
93b6f7c
Modify ops.py code format
Charles-hit Jul 26, 2022
ee41f42
add lgamma single test and lgamma api in fluid
Charles-hit Jul 27, 2022
b4154ab
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 27, 2022
f9c622e
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 27, 2022
1841138
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Charles-hit Jul 27, 2022
f0fbddd
Optimized lgamma unittest
Charles-hit Jul 27, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 0 additions & 94 deletions paddle/fluid/operators/lgamma_op.cc

This file was deleted.

9 changes: 9 additions & 0 deletions paddle/phi/api/yaml/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,15 @@
func : erf
backward : erf_grad

- api : lgamma
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : lgamma
backward : lgamma_grad

- api : mv
args : (Tensor x, Tensor vec)
output : Tensor
Expand Down
6 changes: 6 additions & 0 deletions paddle/phi/api/yaml/api_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,12 @@
outputs :
out : Out

- api : lgamma
inputs :
x : X
outputs :
out : Out

- api : mv
inputs :
{x : X, vec : Vec}
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,16 @@
func : erf_grad
data_type : out_grad

- backward_api : lgamma_grad
forward : lgamma(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : lgamma_grad

- backward_api : mv_grad
forward : mv (Tensor x, Tensor vec) -> Tensor(out)
args : (Tensor x, Tensor vec, Tensor out_grad)
Expand Down
9 changes: 0 additions & 9 deletions paddle/phi/api/yaml/legacy_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1263,15 +1263,6 @@
kernel :
func : less_than

- api : lgamma
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : lgamma
backward : lgamma_grad

- api : linspace
args : (Tensor start, Tensor stop, Tensor number, DataType dtype)
output : Tensor
Expand Down
10 changes: 0 additions & 10 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1106,16 +1106,6 @@
kernel :
func : lerp_grad

- backward_api : lgamma_grad
forward : lgamma(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : lgamma_grad

- backward_api : log10_grad
forward : log10 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
Expand Down
24 changes: 0 additions & 24 deletions paddle/phi/ops/compat/lgamma_sig.cc

This file was deleted.

68 changes: 32 additions & 36 deletions python/paddle/fluid/layers/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from paddle.utils import deprecated
from paddle import _C_ops
import paddle

__deprecated_func_name__ = {
'tanh_shrink': 'tanhshrink',
Expand All @@ -37,28 +38,9 @@
]

__unary_func__ = [
'exp',
'expm1',
'atan',
'sqrt',
'rsqrt',
'abs',
'ceil',
'floor',
'cos',
'tan',
'acos',
'sin',
'sinh',
'asin',
'cosh',
'round',
'reciprocal',
'square',
'lgamma',
'acosh',
'asinh',
'atanh',
'exp', 'expm1', 'atan', 'sqrt', 'rsqrt', 'abs', 'ceil', 'floor', 'cos',
'tan', 'acos', 'sin', 'sinh', 'asin', 'cosh', 'round', 'reciprocal',
'square', 'acosh', 'asinh', 'atanh', 'lgamma'
]

__inplace_unary_func__ = [
Expand Down Expand Up @@ -480,20 +462,6 @@

""")

add_sample_code(
globals()["lgamma"], r"""
Examples:
.. code-block:: python

import paddle

x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.lgamma(x)
print(out)
# [1.31452441, 1.76149750, 2.25271273, 1.09579802]

""")

add_sample_code(
globals()["softplus"], r"""
Examples:
Expand Down Expand Up @@ -860,3 +828,31 @@ def erf(x, name=None):
print(out)
# [-0.42839236 -0.22270259 0.11246292 0.32862676]
"""


def lgamma(x, name=None):
r"""
Calculates the lgamma of the given input tensor, element-wise.

This operator performs elementwise lgamma for input $X$.
:math:`out = log\Gamma(x)`


Args:
x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Tensor, the lgamma of the input Tensor, the shape and data type is the same with input.

Examples:
.. code-block:: python

import paddle

x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.lgamma(x)
print(out)
# [1.31452441, 1.76149750, 2.25271273, 1.09579802]
"""
return paddle.Tensor.lgamma(x)
15 changes: 15 additions & 0 deletions python/paddle/fluid/tests/unittests/test_lgamma_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np
import paddle
from op_test import OpTest
from scipy import special

paddle.enable_static()

Expand Down Expand Up @@ -58,5 +59,19 @@ def test_check_grad_normal(self):
check_eager=True)


class TestLgammaOpApi(unittest.TestCase):

def test_lgamma(self):
paddle.disable_static()
self.dtype = "float32"
shape = (1, 4)
data = np.random.random(shape).astype(self.dtype) + 1
data_ = paddle.to_tensor(data)
out = paddle.fluid.layers.lgamma(data_)
result = special.gammaln(data)
self.assertTrue(np.allclose(result, out.numpy()))
paddle.enable_static()


if __name__ == "__main__":
unittest.main()
38 changes: 37 additions & 1 deletion python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@
from .ops import sqrt # noqa: F401
from .ops import sqrt_ # noqa: F401
from .ops import sin # noqa: F401
from .ops import lgamma # noqa: F401
from .ops import asinh # noqa: F401
from .ops import acosh # noqa: F401
from .ops import atanh # noqa: F401
Expand Down Expand Up @@ -3713,6 +3712,43 @@ def digamma(x, name=None):
helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
return out

def lgamma(x, name=None):
r"""
Calculates the lgamma of the given input tensor, element-wise.

This operator performs elementwise lgamma for input $X$.
:math:`out = log\Gamma(x)`


Args:
x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Tensor, the lgamma of the input Tensor, the shape and data type is the same with input.

Examples:
.. code-block:: python

import paddle

x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.lgamma(x)
print(out)
# [1.31452441, 1.76149750, 2.25271273, 1.09579802]
"""
if in_dygraph_mode():
return _C_ops.final_state_lgamma(x)
elif _in_legacy_dygraph():
return _C_ops.lgamma(x)

check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lgamma')
helper = LayerHelper('lgamma', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='lgamma', inputs={'X': x}, outputs={'Out': out})
return out


def neg(x, name=None):
"""
This function computes the negative of the Tensor elementwisely.
Expand Down
Loading