When run flod_scale_axis pass, Check failed: op->is_scalar():

When running fold_scale_axis pass, if in_scale is type of relay.const, I encounter a error like this:

  • tvm._ffi.base.TVMError: Traceback (most recent call last):

  • [bt] (8) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(tvm::relay::ScheduleGetter::VisitExpr_(tvm::relay::CallNode const*)+0x24e) [0x7f9ed02e51b8]

  • [bt] (7) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(tvm::relay::ScheduleGetter::VisitExpr(tvm::relay::Expr const&)+0x98) [0x7f9ed02e46e2]

  • [bt] (6) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>::VisitExpr(tvm::relay::Expr const&)+0x12d) [0x7f9ed02ec35b]

  • [bt] (5) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(tvm::IRFunctor<tvm::Array<tvm::Tensor, void> (tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>)>::operator()(tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>) const+0x16d) [0x7f9ed02ee9fb]

  • [bt] (4) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(std::function<tvm::Array<tvm::Tensor, void> (tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>)>::operator()(tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>) const+0x66) [0x7f9ed02f35ba]

  • [bt] (3) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(std::_Function_handler<tvm::Array<tvm::Tensor, void> (tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>), tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>::InitVTable()::{lambda(tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>)#1}>::_M_invoke(std::_Any_data const&, tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>*)+0x5a) [0x7f9ed02f5739]

  • [bt] (2) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>::InitVTable()::{lambda(tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>)#1}::operator()(tvm::NodeRef const&, tvm::relay::ExprFunctor<tvm::Array<tvm::Tensor, void> (tvm::relay::Expr const&)>) const+0x46) [0x7f9ed02edb22]

  • [bt] (1) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(tvm::relay::ScheduleGetter::VisitExpr_(tvm::relay::ConstantNode const*)+0xa5) [0x7f9ed02e4bbf]

  • [bt] (0) /home/yin/.local/lib/python3.5/site-packages/tvm-0.6.dev0-py3.5-linux-x86_64.egg/tvm/libtvm.so(dmlc::LogMessageFatal::~LogMessageFatal()+0x25) [0x7f9ecfa79049]

  • File “/home/yin/program_files/tvm/src/relay/backend/compile_engine.cc”, line 179

  • TVMError: Check failed: op->is_scalar():
    the code is below
    def test_fold_fwd_simple():
    “”“Simple testcase.”""
    def before(x, conv_weight, in_bias, in_scale, channels):
    args = [x, conv_weight, in_bias]
    in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
    x = relay.multiply(x, in_scale)
    x = relay.nn.relu(x)
    x = relay.add(x, in_bias)
    y = relay.nn.conv2d(x, conv_weight,
    channels=channels,
    kernel_size=(3, 3),
    padding=(1, 1))

      return relay.Function(args, y)
    

    def expected(x, conv_weight, in_bias, in_scale, channels):
    # use a fixed order of args so alpha equal check can pass
    args = [x, conv_weight, in_bias]
    in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
    squeezed_scale = relay.squeeze(in_scale, axis=[1,2])
    x = relay.nn.relu(x)
    in_bias = relay.divide(in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
    x = relay.add(x, in_bias)
    conv_weight = relay.multiply(
    conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
    y = relay.nn.conv2d(x, conv_weight,
    channels=channels,
    kernel_size=(3, 3),
    padding=(1, 1))
    return relay.Function(args, y)

    def check(shape, channels):
    x = relay.var(“x”, shape=shape)
    in_channels = shape[1]
    weight = relay.var(“weight”)
    in_bias = relay.var(“in_bias”, shape=(in_channels,))
    #in_scale = relay.var(“in_scale”, shape=(in_channels, 1, 1))####can run safely, but the pass is useless.
    in_scale = relay.const(_get_positive_scale((in_channels, 1, 1)))####if use this type, the pass is useful, but cannot be compiled.
    y1 = before(x, weight, in_bias, in_scale, channels)
    y1 = run_opt_pass(y1, transform.InferType())

      type_dict = {x.name_hint:x.checked_type for x in y1.params}
      weight = relay.var("weight", type_dict["weight"])
      y1_folded = run_opt_pass(y1, transform.FoldScaleAxis())
      y1_folded = run_opt_pass(y1_folded, transform.ForwardFoldScaleAxis())
      y1_expected = expected(x, weight, in_bias, in_scale, channels)
    
      # y1_folded = run_opt_pass(y1_folded, transform.InferType())
      # y1_folded = run_opt_pass(y1_folded, transform.FuseOps(1))
      y1_expected = run_opt_pass(y1_expected, transform.InferType())
      run_opt_pass(y1, transform.PrintIR())
      run_opt_pass(y1_folded, transform.PrintIR())
      run_opt_pass(y1_expected, transform.PrintIR())
    
      engine = relay.backend.compile_engine.get()
      f = engine.jit(y1_expected, "llvm")#### if in_scale is const type, cannot pass compiling.
      a = tvm.nd.array(np.ones(shape=(2,4,10,10)).astype(A.dtype),tvm.cpu(0))
      w = tvm.nd.array(np.ones(shape=(2,4,3,3)).astype(A.dtype),tvm.cpu(0))
      b = tvm.nd.array(np.asarray([1,1,1,1]).astype(A.dtype),tvm.cpu(0))
      in_s = tvm.nd.array(np.asarray([[[2]],[[2]],[[2]],[[2]]]).astype(A.dtype),tvm.cpu(0))
      o = tvm.nd.array(np.zeros(shape=(2,2,10,10)).astype(A.dtype),tvm.cpu(0))
      f(a,w,b,in_s,o)