AssertionError: Only supports last dimension in target shape being equal to the channel number of input tensor

Hello,

I’m trying to load the generator model of a DCGAN in TVM. It has an input shape of (batch_size, noise_dim) e.g. (1, 100). When calling relay.frontend.from_keras(model, shape_dict) I get the following error.

Traceback (most recent call last):

  File "/.../tvm_dcgan.py", line 38, in <module>
    mod, params = relay.frontend.from_keras(model, shape_dict)

  File "/home/arne/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/relay/frontend/keras.py", line 749, in from_keras
    keras_op_to_relay(inexpr, keras_layer, keras_layer.name + ':' + str(node_idx), etab)

  File "/home/arne/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/relay/frontend/keras.py", line 673, in keras_op_to_relay
    outs = _convert_map[op_name](inexpr, keras_layer, etab)

  File "/home/arne/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/relay/frontend/keras.py", line 466, in _convert_reshape
    "Only supports last dimension in target shape being equal to " \

AssertionError: Only supports last dimension in target shape being equal to the channel number of input tensor.

I’ve searched for AssertionError: Only supports last dimension in target shape... but found nothing. Does anyone know what is causing this error and how to avoid it?

Below is the complete program to reproduce the error.

import keras
from keras import layers
from tvm import relay


def get_model(noise_dim):
    model = keras.Sequential()

    model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(noise_dim,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256)

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    return model


batch_size = 1
noise_dim = 100
model = get_model(noise_dim=noise_dim)
input_shape = (batch_size, noise_dim)
shape_dict = {'input_1': input_shape}
mod, params = relay.frontend.from_keras(model, shape_dict)