Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[API]Support static branch in paddle.to_tensor #45164

Merged
merged 48 commits into from
Aug 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
94724ec
fix_shape
feifei-111 Aug 10, 2022
23ed6e1
code style
feifei-111 Aug 10, 2022
1a02a6e
fix_var_shape
feifei-111 Aug 10, 2022
6f65b26
Merge remote-tracking branch 'upstream/develop' into fix_var_shape
feifei-111 Aug 11, 2022
3f54345
fix assert
feifei-111 Aug 11, 2022
7877146
fix to_tensor badreturn
feifei-111 Aug 11, 2022
5505531
added annotate
feifei-111 Aug 11, 2022
40e60cb
del no need change
feifei-111 Aug 11, 2022
69081cc
fix code style
feifei-111 Aug 11, 2022
e089910
fix code style
feifei-111 Aug 11, 2022
6969a9e
fix dtype
feifei-111 Aug 12, 2022
86fc94c
fix to_tensor
feifei-111 Aug 12, 2022
4b7444b
add test
feifei-111 Aug 12, 2022
b39cc2a
modify test
feifei-111 Aug 12, 2022
f7af87c
fix code style
feifei-111 Aug 12, 2022
cf8a14b
fix codestyle
feifei-111 Aug 12, 2022
b382071
fix transformer
feifei-111 Aug 12, 2022
0dfe2ae
fix test
feifei-111 Aug 12, 2022
166441b
code style
feifei-111 Aug 12, 2022
e8ced58
resume assign
feifei-111 Aug 13, 2022
0af1bd2
fix input dtype not exist
feifei-111 Aug 13, 2022
8a7a4c0
fix complex type
feifei-111 Aug 13, 2022
9055a64
code style
feifei-111 Aug 13, 2022
967eb1c
fix tests
feifei-111 Aug 13, 2022
63762dd
fix transformer for variable
feifei-111 Aug 13, 2022
3f5e3ed
fix stynax
feifei-111 Aug 13, 2022
332766d
cancel to_variable
feifei-111 Aug 13, 2022
bb5045f
fix
feifei-111 Aug 13, 2022
3408d86
annotation
feifei-111 Aug 13, 2022
4641f1f
code style
feifei-111 Aug 13, 2022
5bb2a95
style
feifei-111 Aug 13, 2022
7f2209f
cs
feifei-111 Aug 13, 2022
792643e
fix place
feifei-111 Aug 15, 2022
472175c
dtype check
feifei-111 Aug 15, 2022
2c9480c
fix place
feifei-111 Aug 15, 2022
e41b58b
cast
feifei-111 Aug 16, 2022
9c6e928
code style
feifei-111 Aug 16, 2022
570a5d4
typewrite err
feifei-111 Aug 16, 2022
df3ed2c
fix cudaplace
feifei-111 Aug 16, 2022
ab67fe8
add attr check
feifei-111 Aug 16, 2022
6bf2990
fix type str
feifei-111 Aug 17, 2022
bfee8d6
sue convert dtype
feifei-111 Aug 17, 2022
17f17d3
fix stop gradient
feifei-111 Aug 17, 2022
c029f7b
bug fix
feifei-111 Aug 18, 2022
8f76b90
fix complex type
feifei-111 Aug 18, 2022
8b9d353
code style
feifei-111 Aug 18, 2022
9206a18
fix device
feifei-111 Aug 18, 2022
aa11ad4
1
feifei-111 Aug 18, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,6 @@ def is_to_variable(node):
if utils.is_dygraph_api(node):
return api_name.endswith("to_variable")

if utils.is_paddle_api(node):
return api_name.endswith("to_tensor")

return False


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -556,6 +556,7 @@ def create_out(var_id):
var_base = core.eager.Tensor(var_desc.dtype(), var_desc.shape(),
var_desc.name(), var_desc.type(),
False)
var_base.stop_gradient = var.stop_gradient
out_varbase_map[var_desc.name()] = var_base
return var_base

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

def dyfunc_generator():
for i in range(100):
yield paddle.to_tensor([i] * 10)
yield paddle.fluid.dygraph.to_variable([i] * 10)


def main_func():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# to_tensor api will create 1 less op now, this test was changed
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不用加这一行


from __future__ import print_function

import numpy
Expand Down Expand Up @@ -300,7 +302,7 @@ def init_test_func(self):
self.dygraph_func = dyfunc_tensor_shape_2

def _set_expected_op_num(self):
self.expected_op_num = 2
self.expected_op_num = 1
self.expected_shape_op_num = 0
self.expected_slice_op_num = 0

Expand Down Expand Up @@ -349,7 +351,7 @@ def init_test_func(self):
self.dygraph_func = dyfunc_tuple_shape_1

def _set_expected_op_num(self):
self.expected_op_num = 5
self.expected_op_num = 4
self.expected_shape_op_num = 1
self.expected_slice_op_num = 2

Expand All @@ -364,7 +366,7 @@ def init_test_func(self):
self.dygraph_func = dyfunc_tuple_shape_2

def _set_expected_op_num(self):
self.expected_op_num = 5
self.expected_op_num = 4
self.expected_shape_op_num = 1
self.expected_slice_op_num = 1

Expand All @@ -377,7 +379,7 @@ def init_test_func(self):
self.dygraph_func = dyfunc_tuple_shape_3

def _set_expected_op_num(self):
self.expected_op_num = 5
self.expected_op_num = 4
self.expected_shape_op_num = 1
self.expected_slice_op_num = 2

Expand All @@ -390,7 +392,7 @@ def init_test_func(self):
self.dygraph_func = dyfunc_paddle_shape_api

def _set_expected_op_num(self):
self.expected_op_num = 6
self.expected_op_num = 5
self.expected_shape_op_num = 2
self.expected_slice_op_num = 2

Expand Down Expand Up @@ -492,7 +494,7 @@ def init_test_func(self):
self.dygraph_func = dyfunc_with_while_4

def _set_expected_op_num(self):
self.expected_op_num = 5
self.expected_op_num = 4
self.expected_shape_op_num = 0
self.expected_slice_op_num = 0

Expand Down Expand Up @@ -556,7 +558,7 @@ def _set_test_func(self):
self.dygraph_func = dyfunc_tuple_shape_1

def _set_expected_op_num(self):
self.expected_op_num = 5
self.expected_op_num = 4
self.expected_shape_op_num = 1
self.expected_slice_op_num = 1

Expand Down Expand Up @@ -604,7 +606,7 @@ def init_test_func(self):
self.dygraph_func = dyfunc_change_shape_after_assign

def _set_expected_op_num(self):
self.expected_op_num = 6
self.expected_op_num = 5
self.expected_shape_op_num = 1
self.expected_slice_op_num = 1

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy
import paddle
import unittest
import os
import tempfile
import paddle.inference as paddle_infer
from paddle.fluid.framework import program_guard, Program
import numpy as np
from paddle.fluid import core


def case0(x):
a = paddle.to_tensor([1.0, 2.0, 3.0], dtype="int64")

return a


def case1(x):
paddle.set_default_dtype("float64")
a = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False)

return a


def case2(x):
if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
else:
place = paddle.CPUPlace()
a = paddle.to_tensor([1.0, 2.0, 3.0],
place=place,
dtype="int64",
stop_gradient=False)

return a


def case3(x):
paddle.set_default_dtype("float64")
if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
else:
place = paddle.CPUPlace()
a = paddle.to_tensor([1.0, 2.0, 3.0], place=place)

return a


class TestToTensorReturnVal(unittest.TestCase):

def test_to_tensor_badreturn(self):
paddle.disable_static()
x = paddle.to_tensor([3])

a = paddle.jit.to_static(case0)(x)
b = case0(x)
self.assertTrue(a.dtype == b.dtype)
self.assertTrue(a.stop_gradient == b.stop_gradient)
self.assertTrue(a.place._equals(b.place))

a = paddle.jit.to_static(case1)(x)
b = case1(x)
self.assertTrue(a.dtype == b.dtype)
self.assertTrue(a.stop_gradient == b.stop_gradient)
self.assertTrue(a.place._equals(b.place))

a = paddle.jit.to_static(case2)(x)
b = case2(x)
self.assertTrue(a.dtype == b.dtype)
self.assertTrue(a.stop_gradient == b.stop_gradient)
self.assertTrue(a.place._equals(b.place))

a = paddle.jit.to_static(case3)(x)
b = case3(x)
self.assertTrue(a.dtype == b.dtype)
self.assertTrue(a.stop_gradient == b.stop_gradient)
self.assertTrue(a.place._equals(b.place))


class TestStatic(unittest.TestCase):

def test_static(self):
paddle.enable_static()
main_prog = Program()
starup_prog = Program()
with program_guard(main_prog, starup_prog):
if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
else:
place = paddle.CPUPlace()

x = paddle.to_tensor(paddle.randn([5, 2]),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

单测需要再丰富一下,你这里并没有测试[var, 1, 1]的情况

dtype='float64',
stop_gradient=False,
place=place)

out = paddle.static.nn.fc(x, 1)

sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))

exe = paddle.static.Executor()
exe.run(starup_prog)
res = exe.run(fetch_list=[x, out])


if __name__ == '__main__':
unittest.main()
Loading