[TENSORFLOW] Different inference result between TF and TVM

Here is the component version:

  • llvm 10.0
  • tensorflow 1.14
  • tvm: 36a0bf94cf93c5d4b067ae4359b8807ae2dde2d2

Download the freezed model here:

wget https://zenodo.org/record/2535873/files/resnet50_v1.pb

Here is the script I compile it to TVM:

# tvm, relay
import tvm
from tvm import te
from tvm import relay

# os and numpy
import numpy as np
import os.path

# Tensorflow imports
import tensorflow as tf
try:
    tf_compat_v1 = tf.compat.v1
except ImportError:
    tf_compat_v1 = tf

# Tensorflow utility functions
import tvm.relay.testing.tf as tf_testing

target = 'llvm'
# target_host = 'llvm'
target_host = None
layout = None

model_path = "/home/lesliefang/tvm/test_script/rn50/resnet50_v1.pb"
INPUTS = 'input_tensor'
OUTPUTS = 'softmax_tensor'

with tf_compat_v1.gfile.GFile(model_path, 'rb') as f:
    graph_def = tf_compat_v1.GraphDef()
    graph_def.ParseFromString(f.read())
    graph = tf_compat_v1.import_graph_def(graph_def, name='')

    # Call the utility to import the graph definition into default graph.
    graph_def = tf_testing.ProcessGraphDefParam(graph_def)

    # Add shapes to the graph.
    with tf_compat_v1.Session() as sess:
        graph_def = tf_testing.AddShapesToGraphDef(sess, OUTPUTS)

shape_dict = {INPUTS: (1, 224, 224, 3)}

mod, params = relay.frontend.from_tensorflow(graph_def,
                                             layout=layout,
                                             shape=shape_dict)

print("Tensorflow protobuf imported to relay frontend.")

with tvm.transform.PassContext(opt_level=3):
    graph, lib, params = relay.build(mod,
                                     target=target,
                                     target_host=target_host,
                                     params=params)

# save the graph, lib and params into separate files
from tvm.contrib import util

path_lib = "./export/deploy_lib.tar"
lib.export_library(path_lib)
with open("./export/deploy_graph.json", "w") as fo:
    fo.write(graph)
with open("./export/deploy_param.params", "wb") as fo:
    fo.write(relay.save_param_dict(params))

Here is the script I run the inference in TVM and TF:

import numpy as np

from tvm import relay
from tvm.relay import testing
import tvm
from tvm import te
from tvm.contrib import graph_runtime

# load the module back.
path_lib = "./export/deploy_lib.tar"
loaded_json = open("./export/deploy_graph.json").read()
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = bytearray(open("./export/deploy_param.params", "rb").read())

## Dummy data
batch_size = 1
image_shape = (224, 224, 3)
data_shape = (batch_size,) + image_shape
input_data = np.random.uniform(size=data_shape).astype("float32")*255 - 128.0

ctx = tvm.cpu()
module = graph_runtime.create(loaded_json, loaded_lib, ctx)
module.load_params(loaded_params)
module.run(data=tvm.nd.array(input_data))

# get outputs
tvm_output = module.get_output(0, tvm.nd.empty(((1, 1001)), 'float32'))

predictions = tvm_output.asnumpy()
predictions = np.squeeze(predictions)

# Creates node ID --> English string lookup.
# Tensorflow utility functions
import tvm.relay.testing.tf as tf_testing
map_proto_path = "./imagenet_2012_challenge_label_map_proto.pbtxt"
label_path = "./imagenet_synset_to_human_label_map.txt"

node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path,
                                    uid_lookup_path=label_path)

#TF inference
import tensorflow.compat.v1 as tf
from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference
from tensorflow.python.framework import dtypes

with tf.Session() as sess:

    from tensorflow.python.platform import gfile

    INPUTS = 'input_tensor'
    OUTPUTS = 'softmax_tensor'

    with gfile.FastGFile('./resnet50_v1.pb', 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        sess.graph.as_default()
        tf.import_graph_def(graph_def, name='')

    Ys = sess.graph.get_tensor_by_name('softmax_tensor:0')
    X = sess.graph.get_tensor_by_name('input_tensor:0')

    res = sess.run(Ys, feed_dict={X: input_data})
    res = np.squeeze(res)

print("-------------tvm---------------")
# Print top 5 predictions from TVM output.
top_k = predictions.argsort()[-5:][::-1]
for node_id in top_k:
    human_string = node_lookup.id_to_string(node_id)
    score = predictions[node_id]
    print('%s (score = %.5f)' % (human_string, score))
    
print("--------------tf---------------")
# Print top 5 predictions from TVM output.
top_k = res.argsort()[-5:][::-1]
for node_id in top_k:
    human_string = node_lookup.id_to_string(node_id)
    score = predictions[node_id]
    print('%s (score = %.5f)' % (human_string, score))

And the result is different between tf and tvm:

-------------tvm---------------
sunglasses, dark glasses, shades (score = 0.03218)
Doberman, Doberman pinscher (score = 0.03152)
lampshade, lamp shade (score = 0.02829)
rock beauty, Holocanthus tricolor (score = 0.02535)
nail (score = 0.02501)
--------------tf---------------
barrel, cask (score = 0.00151)
mixing bowl (score = 0.00200)
spider web, spider's web (score = 0.00048)
sea snake (score = 0.00001)
Tibetan mastiff (score = 0.00005)