-
Notifications
You must be signed in to change notification settings - Fork 22
Open
Description
After exporting the ONNX model, the inference error occurs:
Non-zero status code returned while running Add node. Name:'/model/decoder/Add' Status Message: /onnxruntime_src/onnxruntime/core/providers/cpu/math/element_wise_ops.h:560 void onnxruntime::BroadcastIterator::Append(ptrdiff_t, ptrdiff_t) axis == 1 || axis == largest was false. Attempting to broadcast an axis by a dimension other than 1. 6400 by 24960
The export code is as follows:
import numpy as np
import torch
import argparse
import random
from engine import *
from datasets import build_dataset
from models import build_model
import util.misc as utils
from util.logger import setup_logger, AvgerageMeter, EvaluateMeter
import torch.nn as nn
def parse_args():
from config import cfg, merge_from_file, merge_from_list
parser = argparse.ArgumentParser('APGCC')
parser.add_argument('-c', '--config_file', type=str, default="", help='the path to the training config')
parser.add_argument('-t', '--test', action='store_true', default=False, help='Model test')
parser.add_argument('opts', help='overwriting the training config'
'from commandline', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.config_file != "":
cfg = merge_from_file(cfg, args.config_file)
cfg = merge_from_list(cfg, args.opts)
cfg.config_file = args.config_file
cfg.test = args.test
return cfg
class MergeOut(nn.Module):
def __init__(self, model):
super().__init__()
model.eval()
device = torch.device('cpu')
self.model = model.to(device)
def forward(self, x):
outputs = self.model(x)
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0]
outputs_points = outputs['pred_points'][0]
outputs_scores_expand = outputs_scores.view(-1, 1)
result = torch.cat((outputs_points, outputs_scores_expand), dim=1)
return result
def save_onnx(model_path, save_folder):
cfg = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
seed = cfg.SEED
if seed != None:
g = torch.Generator()
g.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
torch.use_deterministic_algorithms(True, warn_only=True)
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':16:8'
os.environ['PYTHONHASHSEED'] = str(seed)
model = build_model(cfg=cfg, training=False)
pretrained_dict = torch.load(model_path, map_location='cpu')
model_dict = model.state_dict()
param_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()}
model_dict.update(param_dict)
model.load_state_dict(model_dict)
merge_model = MergeOut(model)
dummy = torch.zeros(1, 3, 320, 320)
onnx_save_path = os.path.join(save_folder, "apgcc_vgg.onnx")
torch.onnx.export(
merge_model,
(dummy,),
onnx_save_path,
verbose=True,
input_names=["image"],
output_names=["output"],
opset_version=11,
dynamic_axes={
"image": {0: "batch", 2: "height", 3: "width"},
"output": {0: "point_num"},
}
)
print("Done.!")
if __name__ == "__main__":
model_path = '/mnt/e/Crowd_Dataset/SHHA_output/best.pth'
save_folder = '/mnt/e/Crowd_Dataset/SHHA_output'
save_onnx(model_path, save_folder)
Why exporting succeed, however inference failed? TAT
Metadata
Metadata
Assignees
Labels
No labels