[Solved][Python 2 -> 3] relay.build gives error: NoneType' object is not subscriptable

I have a script that was running with Python 2 but as soon as I build TVM again with default Py3 I ran into the following error:

File "/tvm/python/tvm/autotvm/tophub.py", line 207, in load_reference_log
    if (model == inp.target.model and inp.task.workload[0] == workload_name and
TypeError: 'NoneType' object is not subscriptable

Script:

import os
import numpy as np
import tensorflow as tf
import tvm
import tvm.relay.testing.tf as tf_testing
from tvm import relay
import ast
import argparse
from tvm import autotvm
from tvm import relay
from tvm.relay import testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
import tvm.contrib.graph_runtime as runtime
import time
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from tensorflow.contrib import slim

def generate_graph(data_format=None,shape=None):
    g = tf.Graph()
    with g.as_default():
        data = tf.placeholder(dtype=tf.uint8, shape=shape, name='input')

        #with tf.variable_scope('preprocess', reuse=tf.AUTO_REUSE):

        data = tf.cast(data, tf.float32) * tf.constant(1. / 255.)
        data_format=data_format
        depthwise_filter = tf.get_variable(shape=(3,3,3,1), name="deptwise_filter")
        pointwise_filter = tf.get_variable(shape=[1, 1, 3, 3],name="pointwise_filter")
        layer1 = data
        layer1 = tf.nn.separable_conv2d(layer1,depthwise_filter,pointwise_filter,[1,1,1,1],padding="SAME")
        #layer1 = slim.separable_conv2d(layer1, 32, [3,3], depth_multiplier=1, stride=2, rate=1, padding='same', data_format=data_format)
        out_c1 = tf.identity(layer1, 'outc1')
        with tf.Session(graph = g) as sess:
            sess.run(tf.global_variables_initializer())
            frozen_graph = convert_variables_to_constants(sess, sess.graph_def, ['outc1'])
        output_graph='trunk_fpn_1200.graph'
        with tf.gfile.GFile(output_graph, "wb") as f:
            f.write(frozen_graph.SerializeToString())
        #tf.train.write_graph(frozen_graph,'.','trunk_512.graph')

    return frozen_graph, ['outc1']

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Run inference on a frozen model with random input values")
    parser.add_argument("--input_shape", type=ast.literal_eval, default = [1,1200,1920,3], help = "input shape")
    parser.add_argument("--ctx", type = str, default = "gpu", help = "context for target")
    parser.add_argument("--data_format", type = str, default = 'NHWC', help = "NHCW or NHWC")
    args = parser.parse_args()

    #Some useful parameters 
    dtype = 'uint8'
    input_shape = args.input_shape
    data_format = args.data_format
    if args.ctx == "cpu":
        target = 'llvm -mcpu=x86_AVX2'
        ctx = tvm.cpu(0)
        layout = None
    else:
        target = tvm.target.cuda()
        ctx = tvm.gpu(0)
        layout='NCHW'
        target_host = 'llvm'
    
    
    ## Creating dummy input numpy array 
    input_ = np.random.random_integers(0, 255, input_shape).astype(np.uint8)

    graph_def, output_name = generate_graph(data_format=data_format,shape=input_shape)

    net, params = tvm.relay.frontend.from_tensorflow(graph_def, shape={'input':input_shape}, layout=layout)
    print(type(net),type(net.body),type(params))
    print(net.body)
    print('===========================================================')
    with relay.build_config(opt_level=3):
        graph, lib, params = relay.build(net, target=target, target_host = target_host, params = params)
    
    m = runtime.create(graph,lib,ctx)
    m.set_input('input',tvm.nd.array(input_.astype(dtype)))
    m.set_input(**params)

    ## Execute
    m.run()
    tvm_output = m.get_output(0)#, tvm.nd.empty(((1, 1008)), 'float32'))
    answer = tvm_output.asnumpy()
    #answer = np.squeeze(answer)
    print(np.shape(answer))

The entire error stack:

WARNING:autotvm:Cannot find config for target=cuda -model=unknown, workload=('conv2d', (1, 3, 1200, 1920, 'float32'), (3, 3, 1, 1, 'float32'), (1, 1), (0, 0), (1, 1), 'NCHW', 'float32'). A fal
lback configuration is used, which may bring great performance regression.
Traceback (most recent call last):
  File "recreating_ip_mismatch_prob.py", line 84, in <module>
    graph, lib, params = relay.build(net, target=target, target_host = target_host, params = params)
  File "/tvm/python/tvm/relay/build_module.py", line 303, in build
    graph_json, lowered_funcs, params = graph_gen.codegen(func)
  File "/tvm/python/tvm/relay/backend/graph_runtime_codegen.py", line 90, in codegen
    self._codegen(func)
  File "/tvm/python/tvm/_ffi/_ctypes/function.py", line 206, in __call__
    raise get_last_ffi_error()
TypeError: Traceback (most recent call last):
  [bt] (8) /tvm/build/libtvm.so(+0xe0fec9) [0x7f809cadcec9]
  [bt] (7) /tvm/build/libtvm.so(+0xe05c66) [0x7f809cad2c66]
  [bt] (6) /tvm/build/libtvm.so(+0xe0b790) [0x7f809cad8790]
  [bt] (5) /tvm/build/libtvm.so(+0xe05c66) [0x7f809cad2c66]
  [bt] (4) /tvm/build/libtvm.so(+0xe0b3b3) [0x7f809cad83b3]
  [bt] (3) /tvm/build/libtvm.so(+0xe1adff) [0x7f809cae7dff]
  [bt] (2) /tvm/build/libtvm.so(+0xe21c6c) [0x7f809caeec6c]
  [bt] (1) /tvm/build/libtvm.so(+0xe21164) [0x7f809caee164]
  [bt] (0) /tvm/build/libtvm.so(+0xff056b) [0x7f809ccbd56b]
  File "/tvm/python/tvm/_ffi/_ctypes/function.py", line 71, in cfun
    rv = local_pyfunc(*pyargs)
  File "/tvm/python/tvm/relay/op/nn/_nn.py", line 130, in schedule_conv2d
    return topi.generic.schedule_conv2d_nchw(outs)
  File "<decorator-gen-45>", line 2, in schedule_conv2d_nchw
  File "/tvm/python/tvm/target.py", line 372, in dispatch_func
    return dispatch_dict[k](*args, **kwargs)
  File "<decorator-gen-98>", line 2, in config_dispatcher
  File "/tvm/python/tvm/autotvm/task/dispatcher.py", line 215, in dispatch_func
    return dispatch_dict['direct'](cfg, *args, **kwargs)
  File "/tvm/python/tvm/autotvm/task/topi_integration.py", line 428, in template_call
    return f(cfg, outs, *args, **kwargs)
  File "/tvm/topi/python/topi/cuda/conv2d.py", line 151, in schedule_conv2d_nchw_cuda
    traverse_inline(s, outs[0].op, _callback)
  File "/tvm/topi/python/topi/util.py", line 51, in traverse_inline
    _traverse(final_op)
  File "/tvm/topi/python/topi/util.py", line 49, in _traverse
    callback(op)
  File "/tvm/topi/python/topi/cuda/conv2d.py", line 145, in _callback
    schedule_direct_cuda(cfg, s, op.output(0))
  File "/tvm/topi/python/topi/cuda/conv2d_direct.py", line 46, in schedule_direct_cuda
    target.target_name, target.model, 'conv2d', 'direct')
  File "/tvm/python/tvm/autotvm/tophub.py", line 207, in load_reference_log
    if (model == inp.target.model and inp.task.workload[0] == workload_name and
TypeError: 'NoneType' object is not subscriptable

Would be great if you can look a bit deeper into this, seems the error was due to incorrect log format that gives you non in the task.workload.

I’ve hit the same error on the tutorial code by just changing batch size to 2. (my TVM is today’s GitHub master running on Python 3.6/CentOS 7.5)

cuda_0.04.log seems broken at line 483 (containing "cuda -model=tx2", "topi_nn_dense").
It works well for me by deleting (or commenting out) the line from ~/.tvm/tophub/cuda_0.04.log.

Ok. In my case, I am trying to run HD standard models like mobilenets and resents and the problem was , I was not specifying a GPU model as arguments while building tvm.target.cuda(). I chose 1080ti which is closest to the GPU that I am using. Its working now, although not optimizing properly. but thats okay for now

A subscriptable object is any object that implements the getitem special method (think lists, dictionaries). It is an object that records the operations done to it and it can store them as a “script” which can be replayed. You are trying to subscript an object which you think is a list or dict, but actually is None. NoneType is the type of the None object which represents a lack of value, for example, a function that does not explicitly return a value will return None. ‘NoneType’ object is not subscriptable is the one thrown by python when you use the square bracket notation object[key] where an object doesn’t define the getitem method .