|
| 1 | +# Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +# or more contributor license agreements. See the NOTICE file |
| 3 | +# distributed with this work for additional information |
| 4 | +# regarding copyright ownership. The ASF licenses this file |
| 5 | +# to you under the Apache License, Version 2.0 (the |
| 6 | +# "License"); you may not use this file except in compliance |
| 7 | +# with the License. You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, |
| 12 | +# software distributed under the License is distributed on an |
| 13 | +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +# KIND, either express or implied. See the License for the |
| 15 | +# specific language governing permissions and limitations |
| 16 | +# under the License. |
| 17 | + |
| 18 | +import sys |
| 19 | +import pytest |
| 20 | +import numpy as np |
| 21 | + |
| 22 | +import tvm.testing |
| 23 | +from tvm import te |
| 24 | +from tvm import relay |
| 25 | +from tvm.relay.backend import Executor, Runtime |
| 26 | +from tvm.contrib.hexagon.session import Session |
| 27 | +from tvm.testing.usmp import is_tvm_backendallocworkspace_calls |
| 28 | + |
| 29 | +from .conftest import requires_hexagon_toolchain |
| 30 | + |
| 31 | +usmp_enabled = tvm.testing.parameter(False, True) |
| 32 | + |
| 33 | + |
| 34 | +@requires_hexagon_toolchain |
| 35 | +def test_conv2d(hexagon_session: Session, aot_host_target, aot_target, usmp_enabled): |
| 36 | + dtype = "float32" |
| 37 | + input_shape = (1, 8, 8, 3) |
| 38 | + w1_shape = (5, 5, 3, 1) |
| 39 | + w2_shape = (5, 5, 1, 3) |
| 40 | + data = relay.var("data", relay.TensorType(input_shape, dtype)) |
| 41 | + weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype)) |
| 42 | + weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype)) |
| 43 | + y1 = relay.nn.conv2d( |
| 44 | + data, |
| 45 | + weight1, |
| 46 | + padding=(2, 2), |
| 47 | + kernel_size=(5, 5), |
| 48 | + data_layout="NHWC", |
| 49 | + kernel_layout="HWIO", |
| 50 | + out_dtype="float32", |
| 51 | + ) |
| 52 | + y2 = relay.nn.conv2d( |
| 53 | + y1, |
| 54 | + weight2, |
| 55 | + padding=(2, 2), |
| 56 | + kernel_size=(5, 5), |
| 57 | + data_layout="NHWC", |
| 58 | + kernel_layout="HWIO", |
| 59 | + out_dtype="float32", |
| 60 | + ) |
| 61 | + f = relay.Function([data, weight1, weight2], y2) |
| 62 | + relay_mod = tvm.IRModule.from_expr(f) |
| 63 | + relay_mod = relay.transform.InferType()(relay_mod) |
| 64 | + |
| 65 | + weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype( |
| 66 | + dtype=dtype |
| 67 | + ) |
| 68 | + weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype( |
| 69 | + dtype=dtype |
| 70 | + ) |
| 71 | + input_data = np.random.rand( |
| 72 | + input_shape[0], input_shape[1], input_shape[2], input_shape[3] |
| 73 | + ).astype(dtype=dtype) |
| 74 | + |
| 75 | + params = {"weight1": weight1_data, "weight2": weight2_data} |
| 76 | + inputs = {"data": input_data} |
| 77 | + |
| 78 | + with tvm.transform.PassContext(opt_level=3, config={"tir.usmp.enable": usmp_enabled}): |
| 79 | + lowered = tvm.relay.build( |
| 80 | + relay_mod, |
| 81 | + params=params, |
| 82 | + target=tvm.target.Target(aot_target, host=aot_host_target), |
| 83 | + runtime=Runtime("cpp"), |
| 84 | + executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}), |
| 85 | + ) |
| 86 | + |
| 87 | + assert is_tvm_backendallocworkspace_calls(lowered.lib) != usmp_enabled |
| 88 | + |
| 89 | + aot_mod = hexagon_session.get_executor_from_factory(lowered) |
| 90 | + aot_mod.set_input(**inputs) |
| 91 | + aot_mod.run() |
| 92 | + hexagon_output = aot_mod.get_output(0).numpy() |
| 93 | + |
| 94 | + target_llvm = tvm.target.Target("llvm") |
| 95 | + with tvm.transform.PassContext(opt_level=3): |
| 96 | + llvm_lowered = tvm.relay.build( |
| 97 | + relay_mod, |
| 98 | + tvm.target.Target(target_llvm, host=target_llvm), |
| 99 | + runtime=Runtime("cpp"), |
| 100 | + executor=Executor("graph"), |
| 101 | + ) |
| 102 | + |
| 103 | + llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0))) |
| 104 | + llvm_graph_mod.set_input(**params) |
| 105 | + llvm_graph_mod.run(**inputs) |
| 106 | + expected_output = llvm_graph_mod.get_output(0).numpy() |
| 107 | + |
| 108 | + tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5) |
| 109 | + |
| 110 | + |
| 111 | +if __name__ == "__main__": |
| 112 | + sys.exit(pytest.main(sys.argv)) |
0 commit comments