Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Target] Automatically detect system triple when not specified by the user #16513

Merged
merged 6 commits into from
Mar 14, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add issue links to skipped tests
Change-Id: I4be5c8f64850a3612516c0b49ec8fcf4191a4fbb
  • Loading branch information
lhutton1 committed Feb 12, 2024
commit c8363bdfcde80fe148a38a18b9fcebf0f4effd96
2 changes: 2 additions & 0 deletions python/tvm/relay/op/strategy/arm_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,9 @@ def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target):
pt, pl, pb, pr = topi.nn.get_pad_tuple(padding, (kh, kw))
is_winograd_applicable = (
"float" in data.dtype
and "custom" not in data.dtype
and "float" in kernel.dtype
and "custom" not in kernel.dtype
and kh == 3
and kw == 3
and stride_h == 1
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/arm_cpu/injective.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def schedule_injective(outs):
if list(s[x].op.axis):
# do not vectorize for broadcast
dtype = "uint16" if x.dtype == "bfloat16" else x.dtype
(io, ii) = s[x].split(list(s[x].op.axis)[-1], 16 // np.dtype(dtype).itemsize)
(io, ii) = s[x].split(list(s[x].op.axis)[-1], 16 // (tvm.DataType(dtype).bits // 8))
s[x].vectorize(ii)
tvm.te.schedule.AutoInlineInjective(s)

Expand Down
13 changes: 8 additions & 5 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1102,7 +1102,7 @@ def test_forward_quantized_convolution():

if platform.machine() == "aarch64":
pytest.skip(
reason="Grouped convolution type inference error for `arm_cpu`. See <issue link>"
reason="Grouped convolution type inference error for `arm_cpu`. See https://github.com/apache/tvm/issues/16532"
)

_test_tflite2_quantized_convolution(
Expand All @@ -1129,7 +1129,9 @@ def test_forward_quantized_depthwise_convolution():
)

if platform.machine() == "aarch64":
pytest.skip(reason="Tensor intrinsic data type mismatch error. See <issue link>")
pytest.skip(
reason="Tensor intrinsic data type mismatch error. See https://github.com/apache/tvm/issues/16533"
)

_test_tflite2_quantized_depthwise_convolution(
[1, 8, 8, 128], [1, 1, 128, 1], [1, 1], [1, 1], "SAME", "NHWC", 1, tf.int16
Expand Down Expand Up @@ -5104,7 +5106,8 @@ def test_forward_qnn_mobilenet_v3_net():


@pytest.mark.skipif(
platform.machine() == "aarch64", reason="Fails with an output mismatch. See <insert issue here>"
platform.machine() == "aarch64",
reason="Fails with an output mismatch. See https://github.com/apache/tvm/issues/16534",
)
def test_forward_tflite2_qnn_resnet50():
"""Test the Quantized TFLite version 2.1.0 Resnet50 model."""
Expand Down Expand Up @@ -5204,7 +5207,7 @@ def test_forward_tflite_float16():

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Fails during leagalization due to int16 datatype. See <insert issue here>",
reason="Fails during leagalization due to int16 datatype. See https://github.com/apache/tvm/issues/16535",
)
def test_forward_mobilenet_int16():
"""Test int16 quantized model"""
Expand Down Expand Up @@ -5250,7 +5253,7 @@ def representative_dataset():

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Fails during leagalization due to int16 datatype. See <insert issue here>",
reason="Fails during leagalization due to int16 datatype. See https://github.com/apache/tvm/issues/16535",
)
def test_forward_ds_cnn_int16():
"""Test DS_CNN int16 quantized model"""
Expand Down
4 changes: 3 additions & 1 deletion tests/python/relay/test_any.py
Original file line number Diff line number Diff line change
Expand Up @@ -640,7 +640,9 @@ def test_any_conv2d():
)

if platform.machine() == "aarch64":
pytest.skip(reason="Dynamic height and width not supported in arm_cpu. See <issue link>")
pytest.skip(
reason="Dynamic height and width not supported in arm_cpu. See https://github.com/apache/tvm/issues/16536"
)

verify_any_conv2d(
(relay.Any(), 64, relay.Any(), relay.Any()),
Expand Down
4 changes: 0 additions & 4 deletions tests/python/relay/test_custom_datatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,10 +533,6 @@ def run_batchnorm(src_dtype, dst_dtype, rtol=1e-6, atol=1e-6):
)


@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Custom datatype not understood by `arm_cpu` schedule. See <issue link>.",
)
def test_myfloat():
setup_myfloat()

Expand Down
2 changes: 1 addition & 1 deletion tests/python/relay/test_op_qnn_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,7 @@ def test_kernel_size_1x1_strides_2():

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Fails due to encountering none type in autotvm. See <issue link>",
reason="Fails due to encountering none type in autotvm. See https://github.com/apache/tvm/issues/16538",
)
def test_tflite_large_irregular():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
Expand Down
12 changes: 6 additions & 6 deletions tests/python/relay/test_pass_alter_op_layout.py
Original file line number Diff line number Diff line change
Expand Up @@ -1541,7 +1541,7 @@ def test_conv2d_reduce_channels():

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Layout NCHW4c unsupported in `arm_cpu`. See <issue link>",
reason="Layout NCHW4c unsupported in `arm_cpu`. See https://github.com/apache/tvm/issues/16537",
)
def test_alter_layout_nonscalar_broadcast():
"""Test boradcast operators"""
Expand Down Expand Up @@ -1609,7 +1609,7 @@ def alter_conv2d(attrs, inputs, tinfos, out_type):

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Layout NCHW4c unsupported in `arm_cpu`. See <issue link>",
reason="Layout NCHW4c unsupported in `arm_cpu`. See https://github.com/apache/tvm/issues/16537",
)
def test_alter_layout_blocked_no_broadcast():
"""Test boradcast operators working on already blocked layout"""
Expand Down Expand Up @@ -1671,7 +1671,7 @@ def alter_conv2d(attrs, inputs, tinfos, out_type):

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Layout NCHW4c unsupported in `arm_cpu`. See <issue link>",
reason="Layout NCHW4c unsupported in `arm_cpu`. See https://github.com/apache/tvm/issues/16537",
)
def test_alter_layout_blocked_broadcast():
"""Test boradcast operators working on already blocked layout"""
Expand Down Expand Up @@ -1733,7 +1733,7 @@ def alter_conv2d(attrs, inputs, tinfos, out_type):

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Layout NCHW4c unsupported in `arm_cpu`. See <issue link>",
reason="Layout NCHW4c unsupported in `arm_cpu`. See https://github.com/apache/tvm/issues/16537",
)
def test_alter_layout_re_blocking_broadcast():
"""Test of re-blocking shapes with boradcast operators"""
Expand Down Expand Up @@ -1821,7 +1821,7 @@ def alter_conv2d(attrs, inputs, tinfos, out_type):

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Layout NCHW4c unsupported in `arm_cpu`. See <issue link>",
reason="Layout NCHW4c unsupported in `arm_cpu`. See https://github.com/apache/tvm/issues/16537",
)
def test_broadcast_non_adaptable():
"""NCHW4c + [x, x, 4] and NCHW4c is being altered to NCHW"""
Expand Down Expand Up @@ -1893,7 +1893,7 @@ def alter_conv2d(attrs, inputs, tinfos, out_type):

@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Layout NCHW4c unsupported in `arm_cpu`. See <issue link>",
reason="Layout NCHW4c unsupported in `arm_cpu`. See https://github.com/apache/tvm/issues/16537",
)
def test_broadcast_respect_input_layouts():
def before():
Expand Down
8 changes: 2 additions & 6 deletions tests/python/runtime/test_runtime_module_based_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,6 @@ def test_cpu_get_graph_params_run():


@tvm.testing.requires_llvm
@pytest.mark.skipif(
platform.machine() == "aarch64", reason="Fails with an output mismatch. See <issue link>."
)
def test_cpu_get_graph_params_compare():
# Create sample net
from tvm.relay.testing.init import create_workload, Constant
Expand Down Expand Up @@ -168,9 +165,8 @@ def test_cpu_get_graph_params_compare():
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = loaded_lib["get_graph_params"]()

tvm.testing.assert_allclose(
params["conv_weight"].numpy(), loaded_params["p0"].numpy()[0][0], atol=1e-5
)
p0_squeezed = np.squeeze(loaded_params["p0"].numpy())
tvm.testing.assert_allclose(params["conv_weight"].numpy(), p0_squeezed, atol=1e-5)


@tvm.testing.requires_cuda
Expand Down