Skip to content

tflite frontend fail with NCHW #1

@Li-wuhan

Description

@Li-wuhan

relay.frontend.from_tflite() fail, seems NCHW not supported. Have a try with modifying run_tvm.py to NHWC and can relay.frontend.from_tflite() can pass, is it to say we cannot support NCHW when convert TFLite?

root@76802c36f63b:~/tvm_test/tvm-tutorial# python3 run_tvm.py
Traceback (most recent call last):

File "run_tvm.py", line 73, in
tvm_output = run(model_file, image_data)

File "run_tvm.py", line 32, in run
func, params = relay.frontend.from_tflite(tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype})

File "/root/tvm/python/tvm/relay/frontend/tflite.py", line 1386, in from_tflite
mod = _module.Module.from_expr(func)

File "/root/tvm/python/tvm/relay/module.py", line 233, in from_expr
return _module.Module_FromExpr(expr, funcs, defs)

File "/root/tvm/python/tvm/_ffi/_ctypes/function.py", line 211, in call
raise get_last_ffi_error()

tvm._ffi.base.TVMError: Traceback (most recent call last):
[bt] (7) /root/tvm/build/libtvm.so(TVMFuncCall+0x65) [0x7f10176b17d5]
[bt] (6) /root/tvm/build/libtvm.so(+0x11b5c0e) [0x7f10175c9c0e]
[bt] (5) /root/tvm/build/libtvm.so(tvm::relay::ModuleNode::FromExpr(tvm::relay::Expr const&, tvm::Map<tvm::relay::GlobalVar, tvm::relay::Function, void, void> const&, tvm::Map<tvm::relay::GlobalTypeVar, tvm::relay::TypeData, void, void> const&)+0x167) [0x7f10175c7017]
[bt] (4) /root/tvm/build/libtvm.so(tvm::relay::ModuleNode::Add(tvm::relay::GlobalVar const&, tvm::relay::Function const&, bool)+0x58b) [0x7f10175c665b]
[bt] (3) /root/tvm/build/libtvm.so(tvm::relay::InferType(tvm::relay::Function const&, tvm::relay::Module const&, tvm::relay::GlobalVar const&)+0x259) [0x7f10174d32f9]
[bt] (2) /root/tvm/build/libtvm.so(tvm::relay::TypeInferencer::Infer(tvm::relay::Expr)+0x71) [0x7f10174d2b01]
[bt] (1) /root/tvm/build/libtvm.so(tvm::relay::ErrorReporter::RenderErrors(tvm::relay::Module const&, bool)+0x181e) [0x7f101759d4ae]
[bt] (0) /root/tvm/build/libtvm.so(dmlc::LogMessageFatal::~LogMessageFatal()+0x43) [0x7f1016efef23]
File "/root/tvm/src/relay/ir/error.cc", line 133
TVMError:
Error(s) have occurred. The program has been annotated with them:

In main:
v0.0.4
fn (%input: Tensor[(1, 3, 224, 224), float32], %v_param_1: Tensor[(3, 3, 3, 32), float32], %v_param_2: Tensor[(32), float32], %v_param_3: Tensor[(3, 3, 32, 1), float32], %v_param_4: Tensor[(32), float32], %v_param_5: Tensor[(1, 1, 32, 64), float32], %v_param_6: Tensor[(64), float32], %v_param_7: Tensor[(3, 3, 64, 1), float32], %v_param_8: Tensor[(64), float32], %v_param_9: Tensor[(1, 1, 64, 128), float32], %v_param_10: Tensor[(128), float32], %v_param_11: Tensor[(3, 3, 128, 1), float32], %v_param_12: Tensor[(128), float32], %v_param_13: Tensor[(1, 1, 128, 128), float32], %v_param_14: Tensor[(128), float32], %v_param_15: Tensor[(3, 3, 128, 1), float32], %v_param_16: Tensor[(128), float32], %v_param_17: Tensor[(1, 1, 128, 256), float32], %v_param_18: Tensor[(256), float32], %v_param_19: Tensor[(3, 3, 256, 1), float32], %v_param_20: Tensor[(256), float32], %v_param_21: Tensor[(1, 1, 256, 256), float32], %v_param_22: Tensor[(256), float32], %v_param_23: Tensor[(3, 3, 256, 1), float32], %v_param_24: Tensor[(256), float32], %v_param_25: Tensor[(1, 1, 256, 512), float32], %v_param_26: Tensor[(512), float32], %v_param_27: Tensor[(3, 3, 512, 1), float32], %v_param_28: Tensor[(512), float32], %v_param_29: Tensor[(1, 1, 512, 512), float32], %v_param_30: Tensor[(512), float32], %v_param_31: Tensor[(3, 3, 512, 1), float32], %v_param_32: Tensor[(512), float32], %v_param_33: Tensor[(1, 1, 512, 512), float32], %v_param_34: Tensor[(512), float32], %v_param_35: Tensor[(3, 3, 512, 1), float32], %v_param_36: Tensor[(512), float32], %v_param_37: Tensor[(1, 1, 512, 512), float32], %v_param_38: Tensor[(512), float32], %v_param_39: Tensor[(3, 3, 512, 1), float32], %v_param_40: Tensor[(512), float32], %v_param_41: Tensor[(1, 1, 512, 512), float32], %v_param_42: Tensor[(512), float32], %v_param_43: Tensor[(3, 3, 512, 1), float32], %v_param_44: Tensor[(512), float32], %v_param_45: Tensor[(1, 1, 512, 512), float32], %v_param_46: Tensor[(512), float32], %v_param_47: Tensor[(3, 3, 512, 1), float32], %v_param_48: Tensor[(512), float32], %v_param_49: Tensor[(1, 1, 512, 1024), float32], %v_param_50: Tensor[(1024), float32], %v_param_51: Tensor[(3, 3, 1024, 1), float32], %v_param_52: Tensor[(1024), float32], %v_param_53: Tensor[(1, 1, 1024, 1024), float32], %v_param_54: Tensor[(1024), float32], %v_param_55: Tensor[(1, 1, 1024, 1001), float32], %v_param_56: Tensor[(1001), float32]) {
%0 = nn.pad(%input, pad_width=[[0, 0], [0, 1], [0, 1], [0, 0]]);
%1 = nn.conv2d(%0, %v_param_1, strides=[2, 2], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") in particular dimension 2 conflicts 224 does not match 3; unable to unify: Tensor[(3, 3, 224, 32), float32] and Tensor[(3, 3, 3, 32), float32]; ;
%2 = nn.bias_add(%1, %v_param_2, axis=3);
%3 = clip(%2, a_min=0f, a_max=6f);
%4 = nn.pad(%3, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%5 = nn.conv2d(%4, %v_param_3, groups=32, channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%6 = nn.bias_add(%5, %v_param_4, axis=3);
%7 = clip(%6, a_min=0f, a_max=6f);
%8 = nn.conv2d(%7, %v_param_5, channels=64, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%9 = nn.bias_add(%8, %v_param_6, axis=3);
%10 = clip(%9, a_min=0f, a_max=6f);
%11 = nn.pad(%10, pad_width=[[0, 0], [0, 1], [0, 1], [0, 0]]);
%12 = nn.conv2d(%11, %v_param_7, strides=[2, 2], groups=64, channels=64, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%13 = nn.bias_add(%12, %v_param_8, axis=3);
%14 = clip(%13, a_min=0f, a_max=6f);
%15 = nn.conv2d(%14, %v_param_9, channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%16 = nn.bias_add(%15, %v_param_10, axis=3);
%17 = clip(%16, a_min=0f, a_max=6f);
%18 = nn.pad(%17, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%19 = nn.conv2d(%18, %v_param_11, groups=128, channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%20 = nn.bias_add(%19, %v_param_12, axis=3);
%21 = clip(%20, a_min=0f, a_max=6f);
%22 = nn.conv2d(%21, %v_param_13, channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%23 = nn.bias_add(%22, %v_param_14, axis=3);
%24 = clip(%23, a_min=0f, a_max=6f);
%25 = nn.pad(%24, pad_width=[[0, 0], [0, 1], [0, 1], [0, 0]]);
%26 = nn.conv2d(%25, %v_param_15, strides=[2, 2], groups=128, channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%27 = nn.bias_add(%26, %v_param_16, axis=3);
%28 = clip(%27, a_min=0f, a_max=6f);
%29 = nn.conv2d(%28, %v_param_17, channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%30 = nn.bias_add(%29, %v_param_18, axis=3);
%31 = clip(%30, a_min=0f, a_max=6f);
%32 = nn.pad(%31, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%33 = nn.conv2d(%32, %v_param_19, groups=256, channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%34 = nn.bias_add(%33, %v_param_20, axis=3);
%35 = clip(%34, a_min=0f, a_max=6f);
%36 = nn.conv2d(%35, %v_param_21, channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%37 = nn.bias_add(%36, %v_param_22, axis=3);
%38 = clip(%37, a_min=0f, a_max=6f);
%39 = nn.pad(%38, pad_width=[[0, 0], [0, 1], [0, 1], [0, 0]]);
%40 = nn.conv2d(%39, %v_param_23, strides=[2, 2], groups=256, channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%41 = nn.bias_add(%40, %v_param_24, axis=3);
%42 = clip(%41, a_min=0f, a_max=6f);
%43 = nn.conv2d(%42, %v_param_25, channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%44 = nn.bias_add(%43, %v_param_26, axis=3);
%45 = clip(%44, a_min=0f, a_max=6f);
%46 = nn.pad(%45, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%47 = nn.conv2d(%46, %v_param_27, groups=512, channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%48 = nn.bias_add(%47, %v_param_28, axis=3);
%49 = clip(%48, a_min=0f, a_max=6f);
%50 = nn.conv2d(%49, %v_param_29, channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%51 = nn.bias_add(%50, %v_param_30, axis=3);
%52 = clip(%51, a_min=0f, a_max=6f);
%53 = nn.pad(%52, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%54 = nn.conv2d(%53, %v_param_31, groups=512, channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%55 = nn.bias_add(%54, %v_param_32, axis=3);
%56 = clip(%55, a_min=0f, a_max=6f);
%57 = nn.conv2d(%56, %v_param_33, channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%58 = nn.bias_add(%57, %v_param_34, axis=3);
%59 = clip(%58, a_min=0f, a_max=6f);
%60 = nn.pad(%59, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%61 = nn.conv2d(%60, %v_param_35, groups=512, channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%62 = nn.bias_add(%61, %v_param_36, axis=3);
%63 = clip(%62, a_min=0f, a_max=6f);
%64 = nn.conv2d(%63, %v_param_37, channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%65 = nn.bias_add(%64, %v_param_38, axis=3);
%66 = clip(%65, a_min=0f, a_max=6f);
%67 = nn.pad(%66, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%68 = nn.conv2d(%67, %v_param_39, groups=512, channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%69 = nn.bias_add(%68, %v_param_40, axis=3);
%70 = clip(%69, a_min=0f, a_max=6f);
%71 = nn.conv2d(%70, %v_param_41, channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%72 = nn.bias_add(%71, %v_param_42, axis=3);
%73 = clip(%72, a_min=0f, a_max=6f);
%74 = nn.pad(%73, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%75 = nn.conv2d(%74, %v_param_43, groups=512, channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%76 = nn.bias_add(%75, %v_param_44, axis=3);
%77 = clip(%76, a_min=0f, a_max=6f);
%78 = nn.conv2d(%77, %v_param_45, channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%79 = nn.bias_add(%78, %v_param_46, axis=3);
%80 = clip(%79, a_min=0f, a_max=6f);
%81 = nn.pad(%80, pad_width=[[0, 0], [0, 1], [0, 1], [0, 0]]);
%82 = nn.conv2d(%81, %v_param_47, strides=[2, 2], groups=512, channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%83 = nn.bias_add(%82, %v_param_48, axis=3);
%84 = clip(%83, a_min=0f, a_max=6f);
%85 = nn.conv2d(%84, %v_param_49, channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%86 = nn.bias_add(%85, %v_param_50, axis=3);
%87 = clip(%86, a_min=0f, a_max=6f);
%88 = nn.pad(%87, pad_width=[[0, 0], [1, 1], [1, 1], [0, 0]]);
%89 = nn.conv2d(%88, %v_param_51, groups=1024, channels=1024, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI");
%90 = nn.bias_add(%89, %v_param_52, axis=3);
%91 = clip(%90, a_min=0f, a_max=6f);
%92 = nn.conv2d(%91, %v_param_53, channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%93 = nn.bias_add(%92, %v_param_54, axis=3);
%94 = clip(%93, a_min=0f, a_max=6f);
%95 = nn.avg_pool2d(%94, pool_size=[7, 7], strides=[2, 2], layout="NHWC");
%96 = nn.conv2d(%95, %v_param_55, channels=1001, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO");
%97 = nn.bias_add(%96, %v_param_56, axis=3);
%98 = reshape(%97, newshape=[1, 1001]);
nn.softmax(%98, axis=1)
}

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions