Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
248 changes: 179 additions & 69 deletions python/paddle/fluid/tests/unittests/test_bce_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,94 +19,204 @@
from op_test import OpTest


def test_static_layer(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64')
if weight_np is not None:
weight = paddle.data(
name='weight', shape=weight_np.shape, dtype='float64')
bce_loss = paddle.nn.loss.BCELoss(
weight=weight, reduction=reduction)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
res = bce_loss(input, label)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={"input": input_np,
"label": label_np}
if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
return static_result


def test_static_functional(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64')
if weight_np is not None:
weight = paddle.data(
name='weight', shape=weight_np.shape, dtype='float64')
res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction)
else:
res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={"input": input_np,
"label": label_np}
if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
return static_result


def test_dygraph_layer(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
paddle.disable_static()
if weight_np is not None:
weight = paddle.to_tensor(weight_np)
bce_loss = paddle.nn.loss.BCELoss(weight=weight, reduction=reduction)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
dy_res = bce_loss(paddle.to_tensor(input_np), paddle.to_tensor(label_np))
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result


def test_dygraph_functional(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
paddle.disable_static()
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)

if weight_np is not None:
weight = paddle.to_tensor(weight_np)
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction)
else:
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction)
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result


def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None):
if weight_np is None:
expected = -1 * (label_np * np.log(input_np) +
(1. - label_np) * np.log(1. - input_np))
else:
expected = -1 * weight_np * (label_np * np.log(input_np) +
(1. - label_np) * np.log(1. - input_np))

if reduction == 'mean':
expected = np.mean(expected)
elif reduction == 'sum':
expected = np.sum(expected)
else:
expected = expected

return expected


class TestBCELoss(unittest.TestCase):
def test_BCELoss(self):
input_np = np.random.random(size=(20, 30)).astype(np.float64)
label_np = np.random.random(size=(20, 30)).astype(np.float64)
prog = fluid.Program()
startup_prog = fluid.Program()
input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64)
label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float64)
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
reductions = ['sum', 'mean', 'none']
for place in places:
for red in reductions:
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[None, 30], dtype='float64')
label = fluid.data(
name='label', shape=[None, 30], dtype='float64')
bce_loss = paddle.nn.loss.BCELoss(reduction=red)
res = bce_loss(input, label)

exe = fluid.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np,
"label": label_np},
fetch_list=[res])

with fluid.dygraph.guard():
bce_loss = paddle.nn.loss.BCELoss(reduction=red)
dy_res = bce_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_result = dy_res.numpy()

expected = -1 * (label_np * np.log(input_np) +
(1. - label_np) * np.log(1. - input_np))
if red == 'mean':
expected = np.mean(expected)
elif red == 'sum':
expected = np.sum(expected)
else:
expected = expected
for reduction in reductions:
static_result = test_static_layer(place, input_np, label_np,
reduction)
dy_result = test_dygraph_layer(place, input_np, label_np,
reduction)
expected = calc_bceloss(input_np, label_np, reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static_functional(place, input_np,
label_np, reduction)
dy_functional = test_dygraph_functional(place, input_np,
label_np, reduction)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))

def test_BCELoss_weight(self):
input_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float64)
label_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float64)
input_np = np.random.uniform(
0.1, 0.8, size=(2, 3, 4, 10)).astype(np.float64)
label_np = np.random.randint(
0, 2, size=(2, 3, 4, 10)).astype(np.float64)
weight_np = np.random.random(size=(3, 4, 10)).astype(np.float64)
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[None, 3, 4, 10], dtype='float64')
label = fluid.data(
name='label', shape=[None, 3, 4, 10], dtype='float64')
weight = fluid.data(
name='weight', shape=[3, 4, 10], dtype='float64')
bce_loss = paddle.nn.loss.BCELoss(weight=weight)
res = bce_loss(input, label)

exe = fluid.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])

with fluid.dygraph.guard():
bce_loss = paddle.nn.loss.BCELoss(
weight=fluid.dygraph.to_variable(weight_np))
dy_res = bce_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_result = dy_res.numpy()

expected = np.mean(-1 * weight_np *
(label_np * np.log(input_np) +
(1. - label_np) * np.log(1. - input_np)))
for reduction in ['sum', 'mean', 'none']:
static_result = test_static_layer(
place, input_np, label_np, reduction, weight_np=weight_np)
dy_result = test_dygraph_layer(
place, input_np, label_np, reduction, weight_np=weight_np)
expected = calc_bceloss(
input_np, label_np, reduction, weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static_functional(
place, input_np, label_np, reduction, weight_np=weight_np)
dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction, weight_np=weight_np)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))

def test_BCELoss_boardcast(self):
input_np = np.random.uniform(
0.1, 0.8, size=(2, 3, 4, 10)).astype(np.float64)
label_np = np.random.randint(0, 2, size=(3, 4, 10)).astype(np.float64)
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()

static_result = test_static_layer(place, input_np, label_np)
dy_result = test_dygraph_layer(place, input_np, label_np)
expected = calc_bceloss(input_np, label_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))

def test_BCELoss_error(self):
paddle.disable_static()
self.assertRaises(
ValueError, paddle.nn.loss.BCELoss, reduction="unsupport reduction")
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
label = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.binary_cross_entropy,
input=input,
label=label,
reduction="unsupport reduction")
paddle.enable_static()


def bce_loss(input, label):
return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input))
Expand Down
1 change: 1 addition & 0 deletions python/paddle/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@
# from .lod import dynamic_gru #DEFINE_ALIAS
# from .lod import dynamic_lstm #DEFINE_ALIAS
# from .lod import dynamic_lstmp #DEFINE_ALIAS
from .loss import binary_cross_entropy #DEFINE_ALIAS
from .loss import bpr_loss #DEFINE_ALIAS
from .loss import center_loss #DEFINE_ALIAS
from .loss import cross_entropy #DEFINE_ALIAS
Expand Down
Loading