Skip to content

Commit

Permalink
Update scikit-learn requirement from <1.5.0 to <1.6.0 (intel#31)
Browse files Browse the repository at this point in the history
* Update scikit-learn requirement from <1.5.0 to <1.6.0

Updates the requirements on [scikit-learn](https://github.com/scikit-learn/scikit-learn) to permit the latest version.
- [Release notes](https://github.com/scikit-learn/scikit-learn/releases)
- [Commits](scikit-learn/scikit-learn@0.1-beta...1.5.0)

---
updated-dependencies:
- dependency-name: scikit-learn
  dependency-type: direct:development
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update tests

* Fix

* fix last test

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Alessandro Palla <alessandro.palla@intel.com>
  • Loading branch information
dependabot[bot] and alessandropalla authored May 27, 2024
1 parent 81853da commit 5294a5c
Show file tree
Hide file tree
Showing 7 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion dev_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
pytest
pytest-xdist
pytest-cov
scikit-learn < 1.5.0
scikit-learn <= 1.5.0
pre-commit; sys_platform == 'darwin'
sphinx
breathe
Expand Down
4 changes: 2 additions & 2 deletions test/python/test_compile.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def test_compilation(dtype):
y2 = compiled_model(x).detach()
t2 = time.perf_counter()

assert 1 - r2_score(y_ref, y1) < 0.01
assert 1 - r2_score(y_ref.numpy(), y1.numpy()) < 0.01

assert torch.allclose(y1, y2)

Expand All @@ -88,7 +88,7 @@ def test_torch_compile():
else:
compiled_model = torch.compile(model, backend="npu")
y = compiled_model(x).detach()
assert 1 - r2_score(y_ref, y) < 0.01
assert 1 - r2_score(y_ref.numpy(), y.numpy()) < 0.01


@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.int8])
Expand Down
4 changes: 2 additions & 2 deletions test/python/test_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,6 @@ def test_conv(in_channels, out_channels, kernels, dim, bias, dtype):
assert y.dtype == y_ref.dtype
assert y.shape == y_ref.shape
if dtype == torch.int8:
assert 1 - r2_score(y_ref.flatten(), y.flatten()) < 0.05
assert 1 - r2_score(y_ref.flatten().numpy(), y.flatten().numpy()) < 0.05
else:
assert 1 - r2_score(y_ref.flatten(), y.flatten()) < 0.001
assert 1 - r2_score(y_ref.flatten().numpy(), y.flatten().numpy()) < 0.001
5 changes: 2 additions & 3 deletions test/python/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_compilation(tokenizer, decoder_model, dtype):

y = compiled_model(prefill).logits.detach()

assert 1 - r2_score(y_ref.flatten(), y.flatten()) < 0.01
assert 1 - r2_score(y_ref.flatten().numpy(), y.flatten().numpy()) < 0.01


@torch.no_grad
Expand All @@ -75,5 +75,4 @@ def test_phi2_mlp(seq_len, hidden_size, intermediate_size):

out = model(x)

assert 1 - r2_score(reference, out) < 0.001
print(r2_score(reference, out))
assert 1 - r2_score(reference.numpy(), out.numpy()) < 0.001
7 changes: 4 additions & 3 deletions test/python/test_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_matmul(batch, inC, outC):

assert np.isfinite(npu_mm).all()

assert 1 - r2_score(cpu_mm, npu_mm) < 0.001
assert 1 - r2_score(cpu_mm.numpy(), npu_mm) < 0.001


@pytest.mark.parametrize(
Expand All @@ -58,7 +58,8 @@ def test_qmatmul_per_channel_scales(batch, inC, outC):
assert weights_quant.shape == W.shape

# Conversion done properly
assert 1 - r2_score(weights_quant.to(torch.float16) * scale, W) < 0.001
expected_W = weights_quant.to(torch.float16) * scale
assert 1 - r2_score(expected_W.numpy(), W.numpy()) < 0.001

mm = QMatMul(inC, outC, batch)

Expand All @@ -68,4 +69,4 @@ def test_qmatmul_per_channel_scales(batch, inC, outC):

assert np.isfinite(npu_mm).all()

assert 1 - r2_score(cpu_mm, npu_mm) < 0.001
assert 1 - r2_score(cpu_mm.numpy(), npu_mm) < 0.001
2 changes: 1 addition & 1 deletion test/python/test_optimizations.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def test_model(model_name, hidden_size, intermediate_size, sequence_length, bias

output = optimized(example_input)[0]

assert 1 - r2_score(reference.flatten(), output.flatten()) < 0.01
assert 1 - r2_score(reference.flatten().numpy(), output.flatten().numpy()) < 0.01


@pytest.mark.parametrize("layers", [2, 3, 10])
Expand Down
2 changes: 1 addition & 1 deletion test/python/test_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def test_compiled_quantized(batch, inC, outC):

y1 = compiled_model(X).detach()

assert 1 - r2_score(y_ref, y1) < 0.01
assert 1 - r2_score(y_ref.numpy(), y1.numpy()) < 0.01


@pytest.mark.parametrize("batch", [16, 128])
Expand Down

0 comments on commit 5294a5c

Please sign in to comment.