Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions examples/contrib/cifar10/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@


def get_train_test_datasets(path):
if not Path(path).exists():
Path.mkdir(path, parents=True)
path = Path(path)
if not path.exists():
path.mkdir(parents=True)
download = True
else:
download = True if len(os.listdir(path)) < 1 else False
Expand Down
4 changes: 2 additions & 2 deletions ignite/contrib/metrics/precision_recall_curve.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Te
except ImportError:
raise RuntimeError("This contrib module requires sklearn to be installed.")

y_true = y_targets.numpy()
y_pred = y_preds.numpy()
y_true = y_targets.cpu().numpy()
y_pred = y_preds.cpu().numpy()
return precision_recall_curve(y_true, y_pred)


Expand Down
2 changes: 2 additions & 0 deletions tests/ignite/contrib/metrics/test_average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,8 @@ def get_test_cases():
for _ in range(3):
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
y_pred = y_pred.to(device)
y = y.to(device)
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
Expand Down
22 changes: 12 additions & 10 deletions tests/ignite/contrib/metrics/test_precision_recall_curve.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ def update_fn(engine, batch):

data = list(range(size // batch_size))
precision, recall, thresholds = engine.run(data, max_epochs=1).metrics["precision_recall_curve"]
precision = precision.numpy()
recall = recall.numpy()
thresholds = thresholds.numpy()
precision = precision.cpu().numpy()
recall = recall.cpu().numpy()
thresholds = thresholds.cpu().numpy()

assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
Expand Down Expand Up @@ -168,9 +168,9 @@ def _test(y_pred, y, batch_size, metric_device):
res = prc.compute()

assert isinstance(res, Tuple)
assert precision_recall_curve(np_y, np_y_pred)[0] == pytest.approx(res[0])
assert precision_recall_curve(np_y, np_y_pred)[1] == pytest.approx(res[1])
assert precision_recall_curve(np_y, np_y_pred)[2] == pytest.approx(res[2])
assert precision_recall_curve(np_y, np_y_pred)[0] == pytest.approx(res[0].cpu().numpy())
assert precision_recall_curve(np_y, np_y_pred)[1] == pytest.approx(res[1].cpu().numpy())
assert precision_recall_curve(np_y, np_y_pred)[2] == pytest.approx(res[2].cpu().numpy())

def get_test_cases():
test_cases = [
Expand All @@ -183,9 +183,11 @@ def get_test_cases():
]
return test_cases

for _ in range(5):
for _ in range(3):
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
y_pred = y_pred.to(device)
y = y.to(device)
_test(y_pred, y, batch_size, "cpu")
if device.type != "xla":
_test(y_pred, y, batch_size, idist.device())
Expand Down Expand Up @@ -229,9 +231,9 @@ def update(engine, i):
assert precision.shape == sk_precision.shape
assert recall.shape == sk_recall.shape
assert thresholds.shape == sk_thresholds.shape
assert pytest.approx(precision) == sk_precision
assert pytest.approx(recall) == sk_recall
assert pytest.approx(thresholds) == sk_thresholds
assert pytest.approx(precision.cpu().numpy()) == sk_precision
assert pytest.approx(recall.cpu().numpy()) == sk_recall
assert pytest.approx(thresholds.cpu().numpy()) == sk_thresholds

metric_devices = ["cpu"]
if device.type != "xla":
Expand Down