import tvm
from tvm import relay
p0 = relay.var('p0', shape=(1, 7, 7, 832), dtype='uint8')
p1 = relay.var('p1', shape=(1, 1, 832, 384), dtype='int8')
p2 = relay.var('p2', shape=(1, 7, 7, 384), dtype='int32')
conv = relay.nn.conv2d(p0, p1, kernel_size=(1, 1), data_layout='NHWC', kernel_layout='HWIO',
out_dtype='int32')
subtract = relay.subtract(conv, p2)
z = subtract
func = relay.Function([p0, p1, p2], z)
mod = relay.Module.from_expr(func)
mod = relay.transform.InferType()(mod)
print(mod)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod, 'llvm')
The above test fails with
TVMError: Check failed: false: Incompatible broadcast dims: 832 and 384 in: [1, 7, 7, 832] and [1, 7, 7, 384]
The print(mod) above shows well-formed Expr
v0.0.3
def @main(%p0: Tensor[(1, 7, 7, 832), uint8], %p1: Tensor[(1, 1, 832, 384), int8], %p2: Tensor[(1, 7, 7, 384), int32]) -> Tensor[(1, 7, 7, 384), int32] {
%0 = nn.conv2d(%p0, %p1, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32") /* ty=Tensor[(1, 7, 7, 384), int32] */;
subtract(%0, %p2) /* ty=Tensor[(1, 7, 7, 384), int32] */
}