[FRONTEND][Tensorflow] - Reshape Error

Facing below error while compiling the tensorflow model with NNVM tensorflow frontend.

Error:
Traceback (most recent call last):
File “TensorflowModel.py”, line 80, in
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, dtype=dtype_dict, params=params)
File “/home/ubuntu/.local/lib/python3.6/site-packages/nnvm-0.8.0-py3.6.egg/nnvm/compiler/build_module.py”, line 270, in build
ishape, _ = graph_util.infer_shape(graph, **shape)
File “/home/ubuntu/.local/lib/python3.6/site-packages/nnvm-0.8.0-py3.6.egg/nnvm/compiler/graph_util.py”, line 31, in infer_shape
graph = graph.apply(“InferShape”)
File “/home/ubuntu/.local/lib/python3.6/site-packages/nnvm-0.8.0-py3.6.egg/nnvm/graph.py”, line 234, in apply
check_call(_LIB.NNGraphApplyPasses(self.handle, npass, cpass, ctypes.byref(ghandle)))
File “/home/ubuntu/.local/lib/python3.6/site-packages/nnvm-0.8.0-py3.6.egg/nnvm/_base.py”, line 75, in check_call
raise NNVMError(py_str(_LIB.NNGetLastError()))
nnvm._base.NNVMError: Error in operator Flatten/flatten/Reshape: [15:40:21] /home/ubuntu/NNVM/tvm/nnvm/src/top/tensor/transform.cc:499: Check failed: infer_idx < 0 (0 vs. 0) : One and only one dim can be inferred
Stack trace:

@srkreddy1238 @FrozenGene.
Any help here?

Thank you

Can you print more info by below patch ?

diff --git a/nnvm/python/nnvm/frontend/tensorflow.py b/nnvm/python/nnvm/frontend/tensorflow.py
index f2ff6029..4f7f8e66 100644
--- a/nnvm/python/nnvm/frontend/tensorflow.py
+++ b/nnvm/python/nnvm/frontend/tensorflow.py
@@ -416,6 +416,7 @@ def _slice():
 
 def _reshape():
     def _impl(inputs, attr, params):
+        print("Attr:", attr)
         try:
             pop_node = inputs[1]
             shape_arg = params.pop(pop_node.list_output_names()[0])

Hi @srkreddy1238.

below is the Log after above changes:

 Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1, 1, 256]], '_node_name': '_1_0/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1f86858>: [-1, 256], <nnvm.symbol.Symbol object at 0x7f39e1f86a68>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'_output_shapes': [[-1, 1, 1, 256]], 'T': tf.float32, 'Tshape': tf.int32, '_node_name': '_1_1/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1f09bb8>: [-1, 256], <nnvm.symbol.Symbol object at 0x7f39e1f09528>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1, 1, 256]], '_node_name': '_1_2/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1f09e88>: [-1, 256], <nnvm.symbol.Symbol object at 0x7f39e1e336a8>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1, 1, 512]], '_node_name': '_2_0/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1e33a98>: [-1, 512], <nnvm.symbol.Symbol object at 0x7f39e1e33a68>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1, 1, 512]], '_node_name': '_2_1/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1db5408>: [-1, 512], <nnvm.symbol.Symbol object at 0x7f39e1db5528>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1, 1, 512]], '_node_name': '_2_2/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1db5fd8>: [-1, 512], <nnvm.symbol.Symbol object at 0x7f39e1d406a8>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1, 1, 512]], '_node_name': '_2_3/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1d40948>: [-1, 512], <nnvm.symbol.Symbol object at 0x7f39e1d40a68>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'_output_shapes': [[-1, 1, 1, 1024]], 'T': tf.float32, 'Tshape': tf.int32, '_node_name': '_3_0/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1cbfbb8>: [-1, 1024], <nnvm.symbol.Symbol object at 0x7f39e1cbf528>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'_output_shapes': [[-1, 1, 1, 1024]], 'T': tf.float32, 'Tshape': tf.int32, '_node_name': '_3_1/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1cbfe88>: [-1, 1024], <nnvm.symbol.Symbol object at 0x7f39e1c426a8>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1, 1, 1024]], '_node_name': '_3_2/__block/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1c42a98>: [-1, 1024], <nnvm.symbol.Symbol object at 0x7f39e1c42a68>: [4]}, '_input_0d_mismatch': set()}
Attr:  {'T': tf.float32, 'Tshape': tf.int32, '_output_shapes': [[-1, 1024]], '_node_name': '/Flatten/flatten/Reshape', '_target_layout': 'NHWC', '_input_shapes': {<nnvm.symbol.Symbol object at 0x7f39e1bc52b8>: [-1, 1024], <nnvm.symbol.Symbol object at 0x7f39e1bc5468>: [2]}, '_input_0d_mismatch': set()}

Also facing the below error while running the same model using Relay tensorflow frontend.

Traceback (most recent call last):
  File "tensorflowRelay.py", line 68, in <module>
    m.set_input('DecodeJpeg/contents', tvm.nd.array(x.astype(dtype)))
  File "/home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/contrib/graph_runtime.py", line 132, in set_input
    self._get_input(key).copyfrom(value)
  File "tvm/_ffi/_cython/./function.pxi", line 287, in core.FunctionBase.__call__
  File "tvm/_ffi/_cython/./function.pxi", line 222, in core.FuncCall
  File "tvm/_ffi/_cython/./function.pxi", line 211, in core.FuncCall3
  File "tvm/_ffi/_cython/./base.pxi", line 151, in core.CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  [bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(TVMFuncCall+0x61) [0x7fd5b4c65181]
  [bt] (2) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(+0x911e1b) [0x7fd5b4cb3e1b]
  [bt] (1) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(+0x911d0f) [0x7fd5b4cb3d0f]
  [bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(+0x1cb363) [0x7fd5b456d363]
  File "/NNVM/tvm/src/runtime/graph/graph_runtime.cc", line 341
TVMError: Check failed: in_idx >= 0 (-1 vs. 0) : 

Facing the error at this line:

m.set_input(‘DecodeJpeg/contents’, tvm.nd.array(x.astype(dtype)))

Thank you for the response

Please check if you are passing appropriate shape_dict argument for build command.

@srkreddy1238.

This is what i’m passing to relay:

shape_dict={'DecodeJpeg/contents':x.shape}
dtype_dict={'DecodeJpeg/contents':'uint8'}
sym,relay.frontend.from_tensorflow(graph_def,layout=layout,shape=shape_dict) 

input shape being: 1,64,64,3

Bdw.

Any luck on the Reshape part of NNVM side?

@srkreddy1238.
Relay tensorflow frontend issue got resolved.

Getting stuck at NNVM reshape part.
Any help would be great.

Thank you.

could you do the same thing passing dtype={'DecodeJpeg/contents':'uint8'} to nnvm.compiler.build? If we do this, we will call dtype.update internally as user set.

Thank you @FrozenGene.
I’ll try and let you know the outcome.

In the mean while.
Any idea here at relay.

I’m getting error at all opt_level for OpenCL target:

 Traceback (most recent call last):
  File "ensorflowRelay.py", line 81, in <module>
    m.run()
  File "/home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/contrib/graph_runtime.py", line 151, in run
    self._run()
  File "tvm/_ffi/_cython/./function.pxi", line 287, in core.FunctionBase.__call__
  File "tvm/_ffi/_cython/./function.pxi", line 222, in core.FuncCall
  File "tvm/_ffi/_cython/./function.pxi", line 211, in core.FuncCall3
  File "tvm/_ffi/_cython/./base.pxi", line 151, in core.CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  [bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(TVMFuncCall+0x61) [0x7fd1deeb4181]
  [bt] (2) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(+0x942d2b) [0x7fd1def33d2b]
  [bt] (1) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(+0x942787) [0x7fd1def33787]
  [bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/tvm-0.6.dev0-py3.6-linux-x86_64.egg/tvm/libtvm.so(+0x1cb363) [0x7fd1de7bc363]
  File "/home/ubuntu/Desktop/sFaceCita/NNVM/tvm/src/runtime/opencl/opencl_module.cc", line 63
  File "/home/ubuntu/Desktop/sFaceCita/NNVM/tvm/src/runtime/module_util.cc", line 54
TVMError: Check failed: ret == 0 (-1 vs. 0) : Check failed: e == CL_SUCCESS: OpenCL Error, code=-5: CL_OUT_OF_RESOURCES

Hi @FrozenGene.

Tried adding

dtype={‘DecodeJpeg/contents’:‘uint8’}tonnvm.compiler.build

But facing the same Reshape Error as mentioned above.
can you tell where i’m going wrong?

Thank you

@tqchen @thierry @masahi.

Getting stuck at tensorflow NNVM frontend.
Any Help Here?

Thank you

@Vinayak618 can you pull another log with below change ?

The log would be a bit lengthy, you may attach as a file.

diff --git a/nnvm/python/nnvm/frontend/tensorflow.py b/nnvm/python/nnvm/frontend/tensorflow.py
index f2ff6029..97880843 100644
--- a/nnvm/python/nnvm/frontend/tensorflow.py
+++ b/nnvm/python/nnvm/frontend/tensorflow.py
@@ -1309,6 +1309,7 @@ class GraphProto(object):
                 attr['_input_0d_mismatch'] = input_0d_mismatch
 
                 inputs = self._fix_extranodes(node.op, attr, inputs)
+                print("Node Name:", node.name, " Op:", node.op, " Attr:", attr)
                 op = self._convert_operator(node.op, inputs, attr, graph)
 
                 # Check if op is converted to param

Thank you for the response @srkreddy1238.
I’ll try that and let you know.

In the mean while, does opt_level in relay have any importance similar to NNVM, because even after giving opt_level=10, the code is compiling fine and generating the library.

Below Code:

with relay.build_config(opt_level=10):
graph, lib, params = relay.build(sym, target=target, target_host=target_host, params=params)

Issue 2:

and also failing at opt_level=1 for CUDA target.
below is the Error:

File "/home/ubuntu/.local/lib/python3.6/site-packages/topi-0.6.dev0-py3.6.egg/topi/cuda/reduction.py", line 132, in schedule_reduce
    traverse_after_reduce(outs[0].op)
  File "/home/ubuntu/.local/lib/python3.6/site-packages/topi-0.6.dev0-py3.6.egg/topi/cuda/reduction.py", line 115, in traverse_after_reduce
    traverse_after_reduce(tensor.op)
  File "/home/ubuntu/.local/lib/python3.6/site-packages/topi-0.6.dev0-py3.6.egg/topi/cuda/reduction.py", line 120, in traverse_after_reduce
    traverse_before_reduce(tensor.op)
  File "/home/ubuntu/.local/lib/python3.6/site-packages/topi-0.6.dev0-py3.6.egg/topi/cuda/reduction.py", line 103, in traverse_before_reduce
    traverse_before_reduce(tensor.op)
  File "/home/ubuntu/.local/lib/python3.6/site-packages/topi-0.6.dev0-py3.6.egg/topi/cuda/reduction.py", line 103, in traverse_before_reduce
    traverse_before_reduce(tensor.op)
  File "/home/ubuntu/.local/lib/python3.6/site-packages/topi-0.6.dev0-py3.6.egg/topi/cuda/reduction.py", line 103, in traverse_before_reduce
    traverse_before_reduce(tensor.op)
  [Previous line repeated 4 more times]
  File "/home/ubuntu/.local/lib/python3.6/site-packages/topi-0.6.dev0-py3.6.egg/topi/cuda/reduction.py", line 105, in traverse_before_reduce
    raise RuntimeError("Unsupported operator: %s" % operator.tag)
RuntimeError: Unsupported operator: 
Error during compile func

v0.0.1
%22 = fn (%p0: Tensor[(1, 8, 8, 1024), float32], %p1: Tensor[(1024,), float32], %p2: Tensor[(1024,), float32], %p3: Tensor[(1024,), float32], %p4: Tensor[(1024,), float32], %p5: Tensor[(1, 1, 1, 1024), float32], %p6: Tensor[(1, 8, 8, 1024), float32], %p7: Tensor[(1024,), float32], %p8: Tensor[(1024,), float32], %p9: Tensor[(1024,), float32], %p10: Tensor[(1024,), float32], __dict__=meta[StrMap][0]) -> Tensor[(1, 1024), float32] {
  %0 = add(%p1, 0.001f) // ty=Tensor[(1024,), float32]
  %1 = sqrt(%0) // ty=Tensor[(1024,), float32]
  %2 = divide(1f, %1) // ty=Tensor[(1024,), float32]
  %3 = multiply(%2, %p2) // ty=Tensor[(1024,), float32]
  %4 = multiply(%p0, %3) // ty=Tensor[(1, 8, 8, 1024), float32]
  %5 = negative(%p3) // ty=Tensor[(1024,), float32]
  %6 = multiply(%5, %3) // ty=Tensor[(1024,), float32]
  %7 = add(%6, %p4) // ty=Tensor[(1024,), float32]
  %8 = add(%4, %7) // ty=Tensor[(1, 8, 8, 1024), float32]
  %9 = multiply(%8, %p5) // ty=Tensor[(1, 8, 8, 1024), float32]
  %10 = add(%p7, 0.001f) // ty=Tensor[(1024,), float32]
  %11 = sqrt(%10) // ty=Tensor[(1024,), float32]
  %12 = divide(1f, %11) // ty=Tensor[(1024,), float32]
  %13 = multiply(%12, %p8) // ty=Tensor[(1024,), float32]
  %14 = multiply(%p6, %13) // ty=Tensor[(1, 8, 8, 1024), float32]
  %15 = negative(%p9) // ty=Tensor[(1024,), float32]
  %16 = multiply(%15, %13) // ty=Tensor[(1024,), float32]
  %17 = add(%16, %p10) // ty=Tensor[(1024,), float32]
  %18 = add(%14, %17) // ty=Tensor[(1, 8, 8, 1024), float32]
  %19 = add(%9, %18) // ty=Tensor[(1, 8, 8, 1024), float32]
  %20 = nn.relu(%19) // ty=Tensor[(1, 8, 8, 1024), float32]
  %21 = mean(%20, axis=[1, 2]) // ty=Tensor[(1, 1024), float32]
  %21
}
%22
// meta data omitted. you can use show_meta_data=True to include meta data--------------------------

Any help here?