Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

add metrics tests for #9640 #9759

Merged
merged 2 commits into from
Feb 11, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,4 @@ List of Contributors
* [Julian Salazar](https://github.com/JulianSlzr)
* [Meghna Baijal](https://github.com/mbaijal)
* [Tao Hu](https://github.com/dongzhuoyao)
* [Sorokin Evgeniy](https://github.com/TheTweak)
42 changes: 42 additions & 0 deletions tests/python/unittest/test_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,48 @@ def test_nll_loss():
expected_loss = -(np.log(pred[0][2].asscalar()) + np.log(pred[1][1].asscalar())) / 2
assert loss == expected_loss

def test_acc():
pred = mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])
label = mx.nd.array([0, 1, 1])
metric = mx.metric.create('acc')
metric.update([label], [pred])
_, acc = metric.get()
expected_acc = (np.argmax(pred, axis=1) == label).sum().asscalar() / label.size
assert acc == expected_acc

def test_f1():
pred = mx.nd.array([[0.3, 0.7], [1., 0], [0.4, 0.6], [0.6, 0.4], [0.9, 0.1]])
label = mx.nd.array([0, 1, 1, 1, 1])
positives = np.argmax(pred, axis=1).sum().asscalar()
true_positives = (np.argmax(pred, axis=1) == label).sum().asscalar()
precision = true_positives / positives
overall_positives = label.sum().asscalar()
recall = true_positives / overall_positives
f1_expected = 2 * (precision * recall) / (precision + recall)
metric = mx.metric.create('f1')
metric.update([label], [pred])
_, f1 = metric.get()
assert f1 == f1_expected

def test_perplexity():
pred = mx.nd.array([[0.8, 0.2], [0.2, 0.8], [0, 1.]])
label = mx.nd.array([0, 1, 1])
p = pred.asnumpy()[np.arange(label.size), label.asnumpy().astype('int32')]
perplexity_expected = np.exp(-np.log(p).sum()/label.size)
metric = mx.metric.create('perplexity', -1)
metric.update([label], [pred])
_, perplexity = metric.get()
assert perplexity == perplexity_expected

def test_pearsonr():
pred = mx.nd.array([[0.7, 0.3], [0.1, 0.9], [1., 0]])
label = mx.nd.array([[0, 1], [1, 0], [1, 0]])
pearsonr_expected = np.corrcoef(pred.asnumpy().ravel(), label.asnumpy().ravel())[0, 1]
metric = mx.metric.create('pearsonr')
metric.update([label], [pred])
_, pearsonr = metric.get()
assert pearsonr == pearsonr_expected

if __name__ == '__main__':
import nose
nose.runmodule()