Add new ops for relay.frontend.from_onnx.py

I noticed that relay already have operators like proposal and roipooling .Now I have a faster rcnn model in onnx format。I want to compile it use relay。so i add the proposal and roipooling ops in onnx.py。
i just use the interface of ops.vision.proposal and ops.vision.roi_pool .but the errors comes when call the function “relay.frontend.from_onnx”.
the information of errors:
Segmentation fault: 11

Stack trace:
[bt] (0) /home/hua/anaconda3/lib/python3.7/site-packages/mxnet/libmxnet.so(+0x2b64150) [0x7f2af0037150]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x3ef20) [0x7f2b2fac5f20]
[bt] (2) /home/hua/Downloads/tvm/build/libtvm.so(tvm::relay::ROIPoolRel(tvm::Array<tvm::relay::Type, void> const&, int, tvm::Attrs const&, tvm::relay::TypeReporter const&)+0x232) [0x7f2b144e0f32]
[bt] (3) /home/hua/Downloads/tvm/build/libtvm.so(std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), tvm::runtime::TypedPackedFunc<bool (tvm::Array<tvm::relay::Type, void> const&, int, tvm::Attrs const&, tvm::relay::TypeReporter const&)>::AssignTypedLambda<bool ( )(tvm::Array<tvm::relay::Type, void> const&, int, tvm::Attrs const&, tvm::relay::TypeReporter const&)>(bool ( )(tvm::Array<tvm::relay::Type, void> const&, int, tvm::Attrs const&, tvm::relay::TypeReporter const&))::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)+0xd6) [0x7f2b143bd276]
[bt] (4) /home/hua/Downloads/tvm/build/libtvm.so(tvm::relay::TypeSolver::Solve()+0x107c) [0x7f2b145c34bc]
[bt] (5) /home/hua/Downloads/tvm/build/libtvm.so(tvm::relay::TypeInferencer::Infer(tvm::relay::Expr)+0x55) [0x7f2b145ac775]
[bt] (6) /home/hua/Downloads/tvm/build/libtvm.so(tvm::relay::InferType(tvm::relay::Function const&, tvm::relay::Module const&, tvm::relay::GlobalVar const&)+0x39a) [0x7f2b145ad5da]
[bt] (7) /home/hua/Downloads/tvm/build/libtvm.so(tvm::relay::ModuleNode::Add(tvm::relay::GlobalVar const&, tvm::relay::Function const&, bool)+0x826) [0x7f2b1437c1d6]
[bt] (8) /home/hua/Downloads/tvm/build/libtvm.so(tvm::relay::ModuleNode::FromExpr(tvm::relay::Expr const&, tvm::Map<tvm::relay::GlobalVar, tvm::relay::Function, void, void> const&, tvm::Map<tvm::relay::GlobalTypeVar, tvm::relay::TypeData, void, void> const&)+0x185) [0x7f2b1437cf95]

I dont know how to fix this problem ,could anyone give some advice?

when executing the “return _module.Module.from_expr(func), self._params” the error comes.
i also print the func.it shows as below:
I also print the func。
it shows:
%0 = nn.conv2d(%data_input, %conv1_W, strides=[2, 2], padding=[3, 3], dilation=[1, 1, 1], kernel_size=[7, 7]);
%1 = nn.batch_norm(%0, %scale_conv1_scale, %scale_conv1_b, %bn_conv1_mean, %bn_conv1_var, epsilon=1e-05f);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.max_pool2d(%3, pool_size=[3, 3], strides=[2, 2]);
%5 = nn.conv2d(%4, %res2a_branch1_W, dilation=[1, 1, 1], kernel_size=[1, 1]);
%6 = nn.batch_norm(%5, %scale2a_branch1_scale, %scale2a_branch1_b, %bn2a_branch1_mean, %bn2a_branch1_var, epsilon=1e-05f);
%7 = %6.0;
%8 = nn.conv2d(%4, %res2a_branch2a_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, %scale2a_branch2a_scale, %scale2a_branch2a_b, %bn2a_branch2a_mean, %bn2a_branch2a_var, epsilon=1e-05f);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, %res2a_branch2b_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%13 = nn.batch_norm(%12, %scale2a_branch2b_scale, %scale2a_branch2b_b, %bn2a_branch2b_mean, %bn2a_branch2b_var, epsilon=1e-05f);
%14 = %13.0;
%15 = add(%7, %14);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, %res2b_branch2a_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%18 = nn.batch_norm(%17, %scale2b_branch2a_scale, %scale2b_branch2a_b, %bn2b_branch2a_mean, %bn2b_branch2a_var, epsilon=1e-05f);
%19 = %18.0;
%20 = nn.relu(%19);
%21 = nn.conv2d(%20, %res2b_branch2b_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%22 = nn.batch_norm(%21, %scale2b_branch2b_scale, %scale2b_branch2b_b, %bn2b_branch2b_mean, %bn2b_branch2b_var, epsilon=1e-05f);
%23 = %22.0;
%24 = add(%16, %23);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, %res3a_branch1_W, strides=[2, 2], dilation=[1, 1, 1], kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, %scale3a_branch1_scale, %scale3a_branch1_b, %bn3a_branch1_mean, %bn3a_branch1_var, epsilon=1e-05f);
%28 = %27.0;
%29 = nn.conv2d(%25, %res3a_branch2a_W, strides=[2, 2], padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%30 = nn.batch_norm(%29, %scale3a_branch2a_scale, %scale3a_branch2a_b, %bn3a_branch2a_mean, %bn3a_branch2a_var, epsilon=1e-05f);
%31 = %30.0;
%32 = nn.relu(%31);
%33 = nn.conv2d(%32, %res3a_branch2b_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%34 = nn.batch_norm(%33, %scale3a_branch2b_scale, %scale3a_branch2b_b, %bn3a_branch2b_mean, %bn3a_branch2b_var, epsilon=1e-05f);
%35 = %34.0;
%36 = add(%28, %35);
%37 = nn.relu(%36);
%38 = nn.conv2d(%37, %res3b_branch2a_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%39 = nn.batch_norm(%38, %scale3b_branch2a_scale, %scale3b_branch2a_b, %bn3b_branch2a_mean, %bn3b_branch2a_var, epsilon=1e-05f);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, %res3b_branch2b_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, %scale3b_branch2b_scale, %scale3b_branch2b_b, %bn3b_branch2b_mean, %bn3b_branch2b_var, epsilon=1e-05f);
%44 = %43.0;
%45 = add(%37, %44);
%46 = nn.relu(%45);
%47 = nn.conv2d(%46, %res4a_branch1_W, strides=[2, 2], dilation=[1, 1, 1], kernel_size=[1, 1]);
%48 = nn.batch_norm(%47, %scale4a_branch1_scale, %scale4a_branch1_b, %bn4a_branch1_mean, %bn4a_branch1_var, epsilon=1e-05f);
%49 = %48.0;
%50 = nn.conv2d(%46, %res4a_branch2a_W, strides=[2, 2], padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%51 = nn.batch_norm(%50, %scale4a_branch2a_scale, %scale4a_branch2a_b, %bn4a_branch2a_mean, %bn4a_branch2a_var, epsilon=1e-05f);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, %res4a_branch2b_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, %scale4a_branch2b_scale, %scale4a_branch2b_b, %bn4a_branch2b_mean, %bn4a_branch2b_var, epsilon=1e-05f);
%56 = %55.0;
%57 = add(%49, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, %res4b_branch2a_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%60 = nn.batch_norm(%59, %scale4b_branch2a_scale, %scale4b_branch2a_b, %bn4b_branch2a_mean, %bn4b_branch2a_var, epsilon=1e-05f);
%61 = %60.0;
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, %res4b_branch2b_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%64 = nn.batch_norm(%63, %scale4b_branch2b_scale, %scale4b_branch2b_b, %bn4b_branch2b_mean, %bn4b_branch2b_var, epsilon=1e-05f);
%65 = %64.0;
%66 = add(%58, %65);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, %res5a_branch1_W, dilation=[1, 1, 1], kernel_size=[1, 1]);
%69 = nn.batch_norm(%68, %scale5a_branch1_scale, %scale5a_branch1_b, %bn5a_branch1_mean, %bn5a_branch1_var, epsilon=1e-05f);
%70 = %69.0;
%71 = nn.conv2d(%67, %res5a_branch2a_W, padding=[2, 2], dilation=[1, 1, 1], kernel_size=[3, 3]);
%72 = nn.batch_norm(%71, %scale5a_branch2a_scale, %scale5a_branch2a_b, %bn5a_branch2a_mean, %bn5a_branch2a_var, epsilon=1e-05f);
%73 = %72.0;
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, %res5a_branch2b_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%76 = nn.batch_norm(%75, %scale5a_branch2b_scale, %scale5a_branch2b_b, %bn5a_branch2b_mean, %bn5a_branch2b_var, epsilon=1e-05f);
%77 = %76.0;
%78 = add(%70, %77);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, %res5b_branch2a_W, padding=[2, 2], dilation=[1, 1, 1], kernel_size=[3, 3]);
%81 = nn.batch_norm(%80, %scale5b_branch2a_scale, %scale5b_branch2a_b, %bn5b_branch2a_mean, %bn5b_branch2a_var, epsilon=1e-05f);
%82 = %81.0;
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, %res5b_branch2b_W, padding=[2, 2], dilation=[1, 1, 1], kernel_size=[3, 3]);
%85 = nn.batch_norm(%84, %scale5b_branch2b_scale, %scale5b_branch2b_b, %bn5b_branch2b_mean, %bn5b_branch2b_var, epsilon=1e-05f);
%86 = %85.0;
%87 = add(%79, %86);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, %conv_ld1_1_W, dilation=[1, 1, 1], kernel_size=[1, 1]);
%90 = nn.bias_add(%89, %conv_ld1_1_b);
%91 = nn.conv2d(%90, %conv_ld1_2_W, dilation=[1, 1, 1], kernel_size=[1, 1]);
%92 = nn.bias_add(%91, %conv_ld1_2_b);
%93 = nn.conv2d(%67, %rpn_conv/3x3_W, padding=[1, 1], dilation=[1, 1, 1], kernel_size=[3, 3]);
%94 = nn.bias_add(%93, %rpn_conv/3x3_b);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, %rpn_cls_score_W, dilation=[1, 1, 1], kernel_size=[1, 1]);
%97 = nn.bias_add(%96, %rpn_cls_score_b);
%98 = reshape(%97, newshape=[1, 2, 270, 40]);
%99 = nn.softmax(%98, axis=1);
%100 = reshape(%99, newshape=[1, 18, 30, 40]);
%101 = nn.conv2d(%95, %rpn_bbox_pred_W, dilation=[1, 1, 1], kernel_size=[1, 1]);
%102 = nn.bias_add(%101, %rpn_bbox_pred_b);
%103 = vision.proposal(%100, %102, %im_info_input, meta[relay.attrs.ProposalAttrs][0]);
%104 = vision.roi_pool(%92, %103, meta[relay.attrs.ROIPoolAttrs][0]);
%105 = reshape(%104, newshape=[49, 490]);
%106 = nn.batch_flatten(%105);
%107 = multiply(1f, %106);
%108 = transpose(%cls_score_new_W, axes=[1, 0]);
%109 = nn.dense(%107, %108, units=490);
%110 = multiply(1f, %cls_score_new_B);
%111 = nn.bias_add(%109, %110);
%112 = nn.softmax(%111, axis=1);
%113 = reshape(%112, newshape=[49, 7]);
%114 = reshape(%104, newshape=[49, 490]);
%115 = nn.batch_flatten(%114);
%116 = multiply(1f, %115);
%117 = transpose(%bbox_pred_new_W, axes=[1, 0]);
%118 = nn.dense(%116, %117, units=490);
%119 = multiply(1f, %bbox_pred_new_B);
%120 = nn.bias_add(%118, %119);
%121 = reshape(%120, newshape=[49, 28]);
(%113, %121)
}

Maybe your onnx inputs’/outputs’ shape is mismatching with the ROI op’s inputs’/outputs’ shape.
Cause all of the OPs are based on mxnet.

what do you mean by sayying all the Ops are based on mxnet?