Hi,
I am trying to auto-tune a relay layer (conv2d). So far I have been able to obtain the best schedule (stored in a log_file), but I have been unable to use the function “autotvm.apply_history_best” to apply the best schedule to the layer. I was hoping someone could help me figure out what I am doing wrong.
My workflow is the following:
- Tune layer using “topi_x86_conv2d_NCHWc”
- Store log file as “conv2d.log”
- Create a module out of “relay.nn.conv2d”
- Try to apply the schedule “autotvm.apply_history_best(‘conv2d.log’)”
I receive the error:
TypeError: ‘NoneType’ object is not iterable
when applying the best history.
I am tagging @comaniac here since this issue is related to:
#-------------------Code starts here --------------------#
import os
import sys
import numpy as np
import tvm
import logging
from tvm import autotvm
from tvm import relay
from tvm.relay import testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
import tvm.contrib.graph_runtime as runtime
#Details about the target (CPU/GPU)
target = "llvm -mcpu=core-avx2"
batch_size = 1
dtype = "float32"
log_file = "conv2d_x86.log"
#graph_opt_sch_file = "conv2d_x86_graph_opt.log"
#Set the input name of the graph
input_name = "data"
#Set number of threads used for tuning based on the number of
#physical CPU cores on your machine.
num_threads = 16
os.environ["TVM_NUM_THREADS"] = str(num_threads)
#Create function to optiimize
func_create = 'topi_x86_conv2d_NCHWc'
#Arguments to create task
args= (('TENSOR', (1, 3, 224, 224), 'float32'), ('TENSOR', (64, 3, 7, 7), 'float32'), (2, 2), (3, 3, 3, 3), (1, 1), 'NCHW', 'float32')
#Workload for the task
workload = ('conv2d', (1, 3, 224, 224, 'float32'), (64, 3, 7, 7, 'float32'), (2, 2), (3, 3, 3, 3), (1, 1), 'NCHW', 'float32')
task = autotvm.task.create(func_create, args=args,target=target, template_key='direct')
task.workload = workload
print(task)
#Define type of auto-tuner
tuner_obj = XGBTuner(task, loss_type='rank')
#logging config (for printing tuning log to the screen)
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
#We measure 10 times and take average to reduce variance.
measure_option = autotvm.measure_option(
builder='local',
runner=autotvm.LocalRunner(number=10, repeat=1,min_repeat_ms=1000))
#You can use alternatives like XGBTuner.
n_trial = 10
print(n_trial)
tuner_obj.tune(n_trial=n_trial,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file('conv2d.log')])
dtype = 'float32'
data = relay.var("data", shape=(1, 3, 224, 224), dtype=dtype)
kernel = relay.var("kernel", shape=(64, 3, 7, 7), dtype=dtype)
out = relay.nn.conv2d(data, kernel, strides=(1,1), padding=(3,3,3,3), dilation=(1,1),data_layout='NCHW', out_dtype=dtype)
mod = relay.Module.from_expr(out)
print(mod)
#compile kernels with history best records
with autotvm.apply_history_best('conv2d.log'):
print("Compile...")
with relay.build_config(opt_level=3):
graph, lib, params = relay.build_module.build(mod, target="llvm", params=None)