Skip to content

Commit 25da67a

Browse files
committed
merge develop, test=develop
2 parents 8498f66 + 36868e8 commit 25da67a

39 files changed

+6430
-662
lines changed

paddle/fluid/pybind/CMakeLists.txt

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,23 @@ if(WITH_PYTHON)
7272
set(tmp_impl_file ${impl_file}.tmp)
7373

7474
if(WIN32)
75+
file(WRITE ${CMAKE_BINARY_DIR}/paddle/fluid/pybind/op_function_generator_retry.bat ""
76+
"set build_times=1\n"
77+
":retry\n"
78+
"ECHO op_function_generator run %build_times% time\n"
79+
"${CMAKE_BINARY_DIR}/paddle/fluid/pybind/${CMAKE_BUILD_TYPE}/op_function_generator ${impl_file}\n"
80+
"if %ERRORLEVEL% NEQ 0 (\n"
81+
" set /a build_times=%build_times%+1\n"
82+
" if %build_times% GTR 100 (\n"
83+
" exit /b 1\n"
84+
" ) else (\n"
85+
" goto :retry\n"
86+
" )\n"
87+
")\n"
88+
"exit /b 0")
89+
7590
add_custom_command(TARGET op_function_generator POST_BUILD
76-
COMMAND ${CMAKE_BINARY_DIR}/paddle/fluid/pybind/${CMAKE_BUILD_TYPE}/op_function_generator ${impl_file}
91+
COMMAND ${CMAKE_BINARY_DIR}/paddle/fluid/pybind/op_function_generator_retry.bat
7792
)
7893

7994
if(${CBLAS_PROVIDER} STREQUAL MKLML)

paddle/fluid/pybind/op_function_generator.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ std::map<std::string, std::set<std::string>> op_ins_map = {
4141
{"fake_quantize_dequantize_moving_average_abs_max",
4242
{"X", "InScale", "InAccum", "InState"}},
4343
{"nll_loss", {"X", "Label", "Weight"}},
44+
{"bilinear_tensor_product", {"X", "Y", "Weight", "Bias"}},
4445
{"gather", {"X", "Index", "Axis"}},
4546
};
4647

python/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/)
9191
if (WITH_TESTING)
9292
add_subdirectory(paddle/reader/tests)
9393
add_subdirectory(paddle/dataset/tests)
94+
add_subdirectory(paddle/tests)
9495
add_subdirectory(paddle/fluid/tests)
9596
add_subdirectory(paddle/fluid/contrib/tests)
9697
add_subdirectory(paddle/fluid/contrib/slim/tests)

python/paddle/fluid/executor.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -850,6 +850,7 @@ def close(self):
850850

851851
def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
852852
return_numpy, return_merged):
853+
from paddle.optimizer.lr_scheduler import _LRScheduler
853854
exe = program._executor
854855
# TODO(zhenghuihuang): quantization uses Graph in CompiledProgram
855856
# instead of program. We will add support for checking Vars in Graph
@@ -893,6 +894,16 @@ def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
893894
res.append(res_dict)
894895
exe.feed_tensors_into_local_scopes(res)
895896

897+
if hasattr(program._program, 'lr_sheduler'):
898+
lr_sheduler = program._program.lr_sheduler
899+
assert isinstance(lr_sheduler, _LRScheduler), "must be _LRScheduler"
900+
lr_value = lr_sheduler()
901+
lr_var = program._program.global_block().vars[lr_sheduler._var_name]
902+
lr_tensor = _as_lodtensor(lr_value, core.CPUPlace(), lr_var.dtype)
903+
exe.feed_and_split_tensor_into_local_scopes({
904+
lr_sheduler._var_name: lr_tensor
905+
})
906+
896907
fetch_var_names = list(map(_to_name_str, fetch_list))
897908
tensors = exe.run(fetch_var_names, return_merged)._move_to_list()
898909
return as_numpy(tensors) if return_numpy else tensors
@@ -1222,7 +1233,7 @@ def _run_impl(self, program, feed, fetch_list, feed_var_name,
12221233

12231234
def _run_program(self, program, feed, fetch_list, feed_var_name,
12241235
fetch_var_name, scope, return_numpy, use_program_cache):
1225-
1236+
from paddle.optimizer.lr_scheduler import _LRScheduler
12261237
if feed is None:
12271238
feed = {}
12281239
elif isinstance(feed, (list, tuple)):
@@ -1278,6 +1289,16 @@ def _run_program(self, program, feed, fetch_list, feed_var_name,
12781289
fetch_var_name=fetch_var_name)
12791290

12801291
self._feed_data(program, feed, feed_var_name, scope)
1292+
if hasattr(program, 'lr_sheduler'):
1293+
assert isinstance(program.lr_sheduler,
1294+
_LRScheduler), "must be _LRScheduler"
1295+
lr_sheduler = program.lr_sheduler
1296+
lr_value = lr_sheduler()
1297+
lr_var = program.global_block().vars[lr_sheduler._var_name]
1298+
data = np.array([lr_value]).astype(convert_dtype(lr_var.dtype))
1299+
tensor = core.get_variable_tensor(scope, lr_sheduler._var_name)
1300+
tensor.set(data, self.place)
1301+
12811302
if not use_program_cache:
12821303
self._default_executor.run(program.desc, scope, 0, True, True,
12831304
fetch_var_name)

python/paddle/fluid/framework.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4450,6 +4450,8 @@ def network():
44504450
p._current_role = self._current_role
44514451
p.__op_role_var = self.__op_role_var
44524452
p._appending_grad_times = self._appending_grad_times
4453+
if hasattr(self, 'lr_sheduler'):
4454+
p.lr_sheduler = self.lr_sheduler
44534455

44544456
#NOTE(zhiqiu): we sync the cloned program, to update its program by
44554457
# its desc.

python/paddle/fluid/layers/nn.py

100644100755
Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12146,7 +12146,10 @@ def logical_and(x, y, out=None, name=None):
1214612146
res = paddle.logical_and(x, y)
1214712147
print(res.numpy()) # [True False False False]
1214812148
"""
12149-
12149+
if x.shape != y.shape:
12150+
raise TypeError(
12151+
'Input tensors must be same shape, but received x \'s shape: %s, y \'s shape: %s '
12152+
% (x.shape, y.shape))
1215012153
return _logical_op(
1215112154
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
1215212155

@@ -12188,7 +12191,10 @@ def logical_or(x, y, out=None, name=None):
1218812191
res = paddle.logical_or(x, y)
1218912192
print(res.numpy()) # [True True True False]
1219012193
"""
12191-
12194+
if x.shape != y.shape:
12195+
raise TypeError(
12196+
'Input tensors must be same shape, but received x \'s shape: %s, y \'s shape: %s '
12197+
% (x.shape, y.shape))
1219212198
return _logical_op(
1219312199
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
1219412200

@@ -12230,7 +12236,10 @@ def logical_xor(x, y, out=None, name=None):
1223012236
res = paddle.logical_xor(x, y)
1223112237
print(res.numpy()) # [False True True False]
1223212238
"""
12233-
12239+
if x.shape != y.shape:
12240+
raise TypeError(
12241+
'Input tensors must be same shape, but received x \'s shape: %s, y \'s shape: %s '
12242+
% (x.shape, y.shape))
1223412243
return _logical_op(
1223512244
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)
1223612245

python/paddle/fluid/optimizer.py

Lines changed: 40 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -68,14 +68,16 @@ def __init__(self,
6868
regularization=None,
6969
grad_clip=None,
7070
name=None):
71+
# Because of the loop import, so place it in the function body
72+
from paddle.optimizer.lr_scheduler import _LRScheduler
7173
self._parameter_list = list(
7274
parameter_list) if parameter_list is not None else None
7375
self._name = name
7476
if framework.in_dygraph_mode():
75-
if not isinstance(learning_rate, float) and \
76-
not isinstance(learning_rate, LearningRateDecay):
77+
if not isinstance(learning_rate,
78+
(float, LearningRateDecay, _LRScheduler)):
7779
raise TypeError(
78-
"learning rate should be float or LearningRateDecay, got %s here"
80+
"learning rate should be float or _LRScheduler, got %s here"
7981
% type(learning_rate))
8082
if self._parameter_list is None:
8183
raise AttributeError(
@@ -90,11 +92,11 @@ def __init__(self,
9092
% regularization.__str__())
9193
break
9294
else:
93-
if not isinstance(learning_rate, float) and \
94-
not isinstance(learning_rate, framework.Variable):
95+
if not isinstance(learning_rate,
96+
(float, framework.Variable, _LRScheduler)):
9597
raise TypeError(
96-
"learning rate should be float or Variable, got %s here" %
97-
type(learning_rate))
98+
"learning rate should be float or _LRScheduler, got %s here"
99+
% type(learning_rate))
98100

99101
if grad_clip is not None:
100102
if not isinstance(grad_clip, GradientClipBase):
@@ -144,11 +146,15 @@ def state_dict(self):
144146
state_dict = adam.state_dict()
145147
146148
'''
149+
from paddle.optimizer.lr_scheduler import _LRScheduler
147150
state_dict = {}
148151
for k, v in self._accumulators.items():
149152
for para_name, var_tmp in v.items():
150153
state_dict[var_tmp.name] = var_tmp
151154
# global step if use lr decay
155+
if isinstance(self._learning_rate, _LRScheduler):
156+
state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
157+
return state_dict
152158
if isinstance(self._learning_rate, LearningRateDecay):
153159
state_dict["LR_Scheduler"] = self._learning_rate.state_dict()
154160

@@ -192,6 +198,9 @@ def set_dict(self, state_dict):
192198
adam.set_dict(opti_state_dict)
193199
194200
'''
201+
from paddle.optimizer.lr_scheduler import _LRScheduler
202+
if isinstance(self._learning_rate, _LRScheduler):
203+
self._learning_rate.set_dict(state_dict["LR_Scheduler"])
195204

196205
if isinstance(self._learning_rate, LearningRateDecay):
197206
self._learning_rate.set_dict(state_dict["LR_Scheduler"])
@@ -252,6 +261,30 @@ def get_opti_var_name_list(self):
252261
return self._opti_name_list
253262

254263
def _create_global_learning_rate(self):
264+
from paddle.optimizer.lr_scheduler import _LRScheduler
265+
if isinstance(self._learning_rate, _LRScheduler):
266+
lr_var = self._global_learning_rate()
267+
# only create global lr_var once
268+
if not isinstance(lr_var, framework.Variable):
269+
lr_name = unique_name.generate('learning_rate')
270+
self._learning_rate._var_name = lr_name
271+
lr_var = self.helper.create_global_variable(
272+
name=lr_name,
273+
shape=[1],
274+
persistable=True,
275+
stop_gradient=True,
276+
dtype='float32' if self._dtype is None else self._dtype)
277+
main_prog = framework.default_main_program()
278+
main_prog.lr_sheduler = self._learning_rate
279+
main_prog.lr_var = lr_var
280+
self._learning_rate_map[framework.default_main_program(
281+
)] = lr_var
282+
283+
lr_value = float(self._learning_rate())
284+
self.helper.set_variable_initializer(
285+
lr_var, initializer=Constant(value=lr_value))
286+
return
287+
255288
if imperative_base.enabled():
256289
# create learning rate Variable
257290
if isinstance(self._learning_rate, float):
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
import unittest
18+
from op_test import OpTest
19+
20+
import paddle
21+
import paddle.fluid as fluid
22+
import paddle.fluid.core as core
23+
import numpy as np
24+
25+
26+
class TestBilinearAPI(unittest.TestCase):
27+
def test_api(self):
28+
with fluid.program_guard(fluid.default_startup_program(),
29+
fluid.default_main_program()):
30+
if core.is_compiled_with_cuda():
31+
place = core.CUDAPlace(0)
32+
else:
33+
place = core.CPUPlace()
34+
exe = fluid.Executor(place)
35+
36+
data1 = fluid.data(name='X1', shape=[5, 5], dtype='float32')
37+
data2 = fluid.data(name='X2', shape=[5, 4], dtype='float32')
38+
39+
layer1 = np.random.random((5, 5)).astype('float32')
40+
layer2 = np.random.random((5, 4)).astype('float32')
41+
42+
bilinear = paddle.nn.Bilinear(
43+
in1_features=5, in2_features=4, out_features=1000)
44+
ret = bilinear(data1, data2)
45+
46+
exe.run(fluid.default_startup_program())
47+
ret_fetch = exe.run(feed={'X1': layer1,
48+
'X2': layer2},
49+
fetch_list=[ret.name])
50+
self.assertEqual(ret_fetch[0].shape, (5, 1000))
51+
52+
53+
class TestBilinearAPIDygraph(unittest.TestCase):
54+
def test_api(self):
55+
paddle.disable_static()
56+
layer1 = np.random.random((5, 5)).astype('float32')
57+
layer2 = np.random.random((5, 4)).astype('float32')
58+
bilinear = paddle.nn.Bilinear(
59+
in1_features=5, in2_features=4, out_features=1000)
60+
ret = bilinear(paddle.to_tensor(layer1), paddle.to_tensor(layer2))
61+
self.assertEqual(ret.shape, [5, 1000])
62+
63+
64+
if __name__ == "__main__":
65+
unittest.main()

python/paddle/fluid/tests/unittests/test_layers.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3682,5 +3682,32 @@ def test_basic_gru(self):
36823682
batch_first=batch_first)
36833683

36843684

3685+
class TestMetricsDetectionMap(unittest.TestCase):
3686+
def test_detection_map(self):
3687+
program = fluid.Program()
3688+
with program_guard(program):
3689+
detect_res = fluid.layers.data(
3690+
name='detect_res',
3691+
shape=[10, 6],
3692+
append_batch_size=False,
3693+
dtype='float32')
3694+
label = fluid.layers.data(
3695+
name='label',
3696+
shape=[10, 1],
3697+
append_batch_size=False,
3698+
dtype='float32')
3699+
box = fluid.layers.data(
3700+
name='bbox',
3701+
shape=[10, 4],
3702+
append_batch_size=False,
3703+
dtype='float32')
3704+
map_eval = fluid.metrics.DetectionMAP(
3705+
detect_res, label, box, class_num=21)
3706+
cur_map, accm_map = map_eval.get_map_var()
3707+
self.assertIsNotNone(cur_map)
3708+
self.assertIsNotNone(accm_map)
3709+
print(str(program))
3710+
3711+
36853712
if __name__ == '__main__':
36863713
unittest.main()

0 commit comments

Comments
 (0)