|
| 1 | +import numpy as np |
| 2 | +import mxnet as mx |
| 3 | +from mxnet import gluon |
| 4 | +import nnvm |
| 5 | +import tvm |
| 6 | +from tvm.contrib import graph_runtime |
| 7 | + |
| 8 | + |
| 9 | +def test_avg_pool2d(): |
| 10 | + |
| 11 | + # Generate the data |
| 12 | + np.random.seed(0) |
| 13 | + input_shape = [1, 1, 28, 28] |
| 14 | + output_shape = [1, 10] |
| 15 | + data = np.random.random(input_shape).astype('float32') |
| 16 | + |
| 17 | + # Baseline model in MXNet |
| 18 | + net = gluon.nn.HybridSequential() |
| 19 | + with net.name_scope(): |
| 20 | + net.add(gluon.nn.AvgPool2D(pool_size=3, strides=1, padding=1)) |
| 21 | + net.add(gluon.nn.Dense(10)) |
| 22 | + net.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu()) |
| 23 | + net.hybridize() |
| 24 | + baseline_input = mx.nd.array(data, ctx=mx.cpu()) |
| 25 | + baseline_output = net(baseline_input).asnumpy() |
| 26 | + |
| 27 | + # Compiled model |
| 28 | + sym, params = nnvm.frontend.from_mxnet(net) |
| 29 | + target = tvm.target.cuda() |
| 30 | + with nnvm.compiler.build_config(opt_level=3, ext_accel='tensorrt'): |
| 31 | + graph, lib, params = nnvm.compiler.build(sym, target, |
| 32 | + shape={'data': input_shape}, |
| 33 | + params=params) |
| 34 | + compiled_model = graph_runtime.create(graph, lib, tvm.gpu()) |
| 35 | + compiled_input = tvm.nd.array(data, ctx=tvm.gpu()) |
| 36 | + compiled_model.set_input('data', compiled_input) |
| 37 | + compiled_model.set_input(**params) |
| 38 | + compiled_model.run() |
| 39 | + compiled_output = compiled_model.get_output(0, tvm.nd.empty(output_shape)).asnumpy() |
| 40 | + |
| 41 | + # Compare outputs |
| 42 | + np.testing.assert_almost_equal(baseline_output, compiled_output, decimal=3) |
| 43 | + |
| 44 | + |
| 45 | +if __name__ == '__main__': |
| 46 | + test_avg_pool2d() |
0 commit comments