Skip to content

add new API: paddle.clone;Tensor.element_size;nn.utils.parameters_to_vector #38020

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 23, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,8 @@ if(WIN32)
endforeach(flag_var)
endif()

# NOTE(zhouwei): msvc max/min macro conflict with std::min/max, define NOMINMAX globally
add_definitions("-DNOMINMAX")
# windows build turn off warnings, use parallel compiling.
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
Expand Down
5 changes: 5 additions & 0 deletions paddle/fluid/framework/var_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/framework/var_desc.h"

#include "glog/logging.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
Expand Down Expand Up @@ -116,6 +117,10 @@ proto::VarType::Type VarDesc::GetDataType() const {
return tensor_desc().data_type();
}

size_t VarDesc::ElementSize() const {
return framework::SizeOfType(GetDataType());
}

std::vector<proto::VarType::Type> VarDesc::GetDataTypes() const {
std::vector<proto::VarType::TensorDesc> descs = tensor_descs();
std::vector<proto::VarType::Type> res;
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/framework/var_desc.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ class VarDesc {

proto::VarType::Type GetDataType() const;

size_t ElementSize() const;

std::vector<proto::VarType::Type> GetDataTypes() const;

void SetLoDLevel(int32_t lod_level);
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/imperative/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <utility>
#include <vector>

#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/var_type.h"
Expand All @@ -37,7 +38,6 @@
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/pten/include/core.h"

namespace paddle {
namespace framework {
class Variable;
Expand Down Expand Up @@ -212,6 +212,8 @@ class VarBase {

framework::proto::VarType::Type DataType() const { return var_->DataType(); }

size_t ElementSize() const { return framework::SizeOfType(var_->DataType()); }

void SetForwardDataType(framework::proto::VarType::Type data_type) {
var_->SetForwardDataType(data_type);
}
Expand Down
79 changes: 57 additions & 22 deletions paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2013,35 +2013,70 @@ void BindImperative(py::module *m_ptr) {
auto *t = self->MutableVar()->GetMutable<framework::LoDTensor>();
return t->numel();
})
.def("element_size", &imperative::VarBase::ElementSize, R"DOC(
Returns the size in bytes of an element in the Tensor.

Examples:
.. code-block:: python

import paddle

x = paddle.to_tensor(1, dtype='bool')
x.element_size() # 1

x = paddle.to_tensor(1, dtype='float16')
x.element_size() # 2

x = paddle.to_tensor(1, dtype='float32')
x.element_size() # 4

x = paddle.to_tensor(1, dtype='float64')
x.element_size() # 8

x = paddle.to_tensor(1, dtype='complex128')
x.element_size() # 16
)DOC")
.def_property("name", &imperative::VarBase::Name,
&imperative::VarBase::SetName)
.def_property("stop_gradient",
&imperative::VarBase::OverridedStopGradient,
&imperative::VarBase::SetOverridedStopGradient)
.def_property("persistable", &imperative::VarBase::Persistable,
&imperative::VarBase::SetPersistable)
.def_property_readonly(
"shape",
[](imperative::VarBase &self) {
if (self.Var().IsType<framework::LoDTensor>()) {
return framework::vectorize<int>(
self.Var().Get<framework::LoDTensor>().dims());
} else if (self.Var().IsType<framework::SelectedRows>()) {
return framework::vectorize<int>(
self.Var().Get<framework::SelectedRows>().value().dims());
} else if (self.Var().IsType<framework::Strings>()) {
return std::vector<int>{static_cast<int>(
self.Var().Get<framework::Strings>().size())};
} else if (self.Var().IsType<framework::Vocab>()) {
return std::vector<int>{
static_cast<int>(self.Var().Get<framework::Vocab>().size())};
} else {
VLOG(2) << "It is meaningless to get shape of "
"variable type "
<< GetTypeName(self);
return std::vector<int>();
}
})
.def_property_readonly("shape",
[](imperative::VarBase &self) {
if (self.Var().IsType<framework::LoDTensor>()) {
return framework::vectorize<int>(
self.Var()
.Get<framework::LoDTensor>()
.dims());
} else if (self.Var()
.IsType<
framework::SelectedRows>()) {
return framework::vectorize<int>(
self.Var()
.Get<framework::SelectedRows>()
.value()
.dims());
} else if (self.Var()
.IsType<framework::Strings>()) {
return std::vector<int>{static_cast<int>(
self.Var()
.Get<framework::Strings>()
.size())};
} else if (self.Var()
.IsType<framework::Vocab>()) {
return std::vector<int>{static_cast<int>(
self.Var()
.Get<framework::Vocab>()
.size())};
} else {
VLOG(2) << "It is meaningless to get shape of "
"variable type "
<< GetTypeName(self);
return std::vector<int>();
}
})
.def_property_readonly("is_leaf", &imperative::VarBase::IsLeaf,
R"DOC(
Whether a Tensor is leaf Tensor.
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/pybind/protobuf.cc
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,8 @@ void BindVarDsec(pybind11::module *m) {
pybind11::return_value_policy::reference)
.def("dtype", &pd::VarDesc::GetDataType,
pybind11::return_value_policy::reference)
.def("element_size", &pd::VarDesc::ElementSize,
pybind11::return_value_policy::reference)
.def("dtypes", &pd::VarDesc::GetDataTypes,
pybind11::return_value_policy::reference)
.def("lod_level", &pd::VarDesc::GetLoDLevel)
Expand Down
2 changes: 2 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@
from .tensor.creation import empty_like # noqa: F401
from .tensor.creation import assign # noqa: F401
from .tensor.creation import complex # noqa: F401
from .tensor.creation import clone # noqa: F401
from .tensor.linalg import matmul # noqa: F401
from .tensor.linalg import dot # noqa: F401
from .tensor.linalg import norm # noqa: F401
Expand Down Expand Up @@ -587,4 +588,5 @@
'fmin',
'moveaxis',
'repeat_interleave',
'clone',
]
27 changes: 27 additions & 0 deletions python/paddle/fluid/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -1396,6 +1396,33 @@ def to_string(self, throw_on_error, with_details=False):

__repr__ = __str__

def element_size(self):
"""
Returns the size in bytes of an element in the Tensor.

Examples:
.. code-block:: python

import paddle
paddle.enable_static()

x = paddle.static.data(name='x1', shape=[3, 2], dtype='bool')
x.element_size() # 1

x = paddle.static.data(name='x2', shape=[3, 2], dtype='int16')
x.element_size() # 2

x = paddle.static.data(name='x3', shape=[3, 2], dtype='float16')
x.element_size() # 2

x = paddle.static.data(name='x4', shape=[3, 2], dtype='float32')
x.element_size() # 4

x = paddle.static.data(name='x5', shape=[3, 2], dtype='float64')
x.element_size() # 8
"""
return self.desc.element_size()

@property
def stop_gradient(self):
"""
Expand Down
25 changes: 25 additions & 0 deletions python/paddle/fluid/tests/unittests/test_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,31 @@ def test_assign_BasicTypes(self):
self.assertTrue(np.allclose(result3.numpy(), np.array([1])))
paddle.enable_static()

def test_clone(self):
paddle.disable_static()
x = paddle.ones([2])
x.stop_gradient = False
clone_x = paddle.clone(x)

y = clone_x**3
y.backward()

self.assertTrue(np.array_equal(x, [1, 1]), True)
self.assertTrue(np.array_equal(clone_x.grad.numpy(), [3, 3]), True)
self.assertTrue(np.array_equal(x.grad.numpy(), [3, 3]), True)
paddle.enable_static()

with program_guard(Program(), Program()):
x_np = np.random.randn(2, 3).astype('float32')
x = paddle.static.data("X", shape=[2, 3])
clone_x = paddle.clone(x)
exe = paddle.static.Executor()
y_np = exe.run(paddle.static.default_main_program(),
feed={'X': x_np},
fetch_list=[clone_x])[0]

self.assertTrue(np.array_equal(y_np, x_np), True)


class TestAssignOpErrorApi(unittest.TestCase):
def test_errors(self):
Expand Down
45 changes: 30 additions & 15 deletions python/paddle/fluid/tests/unittests/test_parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,19 @@
import copy
import paddle
from paddle.fluid.dygraph import guard
from paddle.fluid.framework import default_main_program
from paddle.fluid.framework import default_main_program, Variable
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
import paddle.fluid.io as io
from paddle.fluid.initializer import ConstantInitializer
import numpy as np

paddle.enable_static()
main_program = default_main_program()


class ParameterChecks(unittest.TestCase):
def check_parameter(self):
def test_parameter(self):
shape = [784, 100]
val = 1.0625
b = main_program.global_block()
Expand All @@ -43,13 +44,13 @@ def check_parameter(self):
self.assertEqual((784, 100), param.shape)
self.assertEqual(core.VarDesc.VarType.FP32, param.dtype)
self.assertEqual(0, param.block.idx)
exe = Executor(core.CPUPlace())
exe = Executor(paddle.CPUPlace())
p = exe.run(main_program, fetch_list=[param])[0]
self.assertTrue(np.allclose(p, np.ones(shape) * val))
self.assertTrue(np.array_equal(p, np.ones(shape) * val))
p = io.get_parameter_value_by_name('fc.w', exe, main_program)
self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val))
self.assertTrue(np.array_equal(p, np.ones(shape) * val))

def check_parambase(self):
def test_parambase(self):
with guard():
linear = paddle.nn.Linear(10, 10)
param = linear.weight
Expand All @@ -71,7 +72,7 @@ def check_parambase(self):
pram_copy2 = copy.deepcopy(param, memo)
self.assertEqual(id(param_copy), id(pram_copy2))

def check_exceptions(self):
def test_exception(self):
b = main_program.global_block()
with self.assertRaises(ValueError):
b.create_parameter(
Expand All @@ -86,16 +87,30 @@ def check_exceptions(self):
b.create_parameter(
name='test', shape=[-1], dtype='float32', initializer=None)

def test_parambase_to_vector(self):
with guard():
initializer = paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(3.))
linear1 = paddle.nn.Linear(10, 15, initializer)

class TestParameter(ParameterChecks):
def _test_parameter(self):
self.check_parameter()

def test_parambase(self):
self.check_parambase()
vec = paddle.nn.utils.parameters_to_vector(linear1.parameters())
self.assertEqual(linear1.weight.shape, [10, 15])
self.assertEqual(linear1.bias.shape, [15])
self.assertTrue(isinstance(vec, Variable))
self.assertTrue(vec.shape, [165])

def test_exceptions(self):
self.check_exceptions()
linear2 = paddle.nn.Linear(10, 15)
paddle.nn.utils.vector_to_parameters(vec, linear2.parameters())
self.assertEqual(linear2.weight.shape, [10, 15])
self.assertEqual(linear2.bias.shape, [15])
self.assertTrue(
np.array_equal(linear1.weight.numpy(), linear2.weight.numpy()),
True)
self.assertTrue(
np.array_equal(linear1.bias.numpy(), linear2.bias.numpy()),
True)
self.assertTrue(linear2.weight.is_leaf, True)
self.assertTrue(linear2.bias.is_leaf, True)


if __name__ == '__main__':
Expand Down
35 changes: 35 additions & 0 deletions python/paddle/fluid/tests/unittests/test_var_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,41 @@ def test_to_string(self):
var = fluid.dygraph.to_variable(self.array)
self.assertTrue(isinstance(str(var), str))

def test_element_size(self):
with fluid.dygraph.guard():
x = paddle.to_tensor(1, dtype='bool')
self.assertEqual(x.element_size(), 1)

x = paddle.to_tensor(1, dtype='float16')
self.assertEqual(x.element_size(), 2)

x = paddle.to_tensor(1, dtype='float32')
self.assertEqual(x.element_size(), 4)

x = paddle.to_tensor(1, dtype='float64')
self.assertEqual(x.element_size(), 8)

x = paddle.to_tensor(1, dtype='int8')
self.assertEqual(x.element_size(), 1)

x = paddle.to_tensor(1, dtype='int16')
self.assertEqual(x.element_size(), 2)

x = paddle.to_tensor(1, dtype='int32')
self.assertEqual(x.element_size(), 4)

x = paddle.to_tensor(1, dtype='int64')
self.assertEqual(x.element_size(), 8)

x = paddle.to_tensor(1, dtype='uint8')
self.assertEqual(x.element_size(), 1)

x = paddle.to_tensor(1, dtype='complex64')
self.assertEqual(x.element_size(), 8)

x = paddle.to_tensor(1, dtype='complex128')
self.assertEqual(x.element_size(), 16)

def test_backward(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
Expand Down
Loading