A problem about the deployment on windows

inference_r.hpp:

#ifndef _TVM_INFERENCE_H
#define _TVM_INFERENCE_H

#include <stdio.h>
#include
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <tvm/runtime/module.h>
#include <tvm/runtime/registry.h>
#include <tvm/runtime/packed_func.h>

//#include “tf_io.hpp”
class Infer_Deploy {
private:
void * handle;
DLTensor* input;
DLTensor* output;
tvm::runtime::PackedFunc run;
tvm::runtime::PackedFunc get_output;
tvm::runtime::Module* module;
tvm::runtime::PackedFunc set_input;
public:
cv::Mat mask_norm;

private:
bool Infer_Work();

public:

Infer_Deploy(int32_t iModelWid, int32_t iModelHei);
~Infer_Deploy();

bool forward(const cv::Mat & f_mat);

};
#endif // _TVM_INFERENCE_H

inference_r.cpp:

#include “inference_r.hpp”
#include <time.h>
#include

using namespace std;

#define MODEL_WID ( 384)
#define MODEL_HEI ( 384)

Infer_Deploy::~Infer_Deploy( )
{
TVMArrayFree(input );
TVMArrayFree(output);
}

Infer_Deploy::Infer_Deploy( int32_t iModelWid, int32_t iModelHei)
{
std::string modelFolder1 = “.\lib”;
std::string param_path = modelFolder1 + “\net.dll”;
tvm::runtime::Module mod_syslib = tvm::runtime::Module::LoadFromFile(param_path);

//load graph
std::ifstream json_in(modelFolder1 + "\\net.json");
std::string json_data((std::istreambuf_iterator<char>(json_in)), std::istreambuf_iterator<char>());
json_in.close();
int device_type = kDLCPU;
int device_id   = 0;

// get global function module for graph runtime
tvm::runtime::Module mod = (*tvm::runtime::Registry::Get("tvm.graph_runtime.create"))
                                      (json_data, mod_syslib, device_type, device_id);
this->handle = new tvm::runtime::Module(mod);

//load param
std::ifstream params_in(modelFolder1 + "\\net.params", std::ios::binary);
std::string params_data((std::istreambuf_iterator<char>(params_in)), 
                         std::istreambuf_iterator<char>());
params_in.close();
TVMByteArray params_arr;
params_arr.data = params_data.c_str();
params_arr.size = params_data.length();
tvm::runtime::PackedFunc load_params = mod.GetFunction("load_params");
load_params(params_arr);

//*********Input initialization************
constexpr int dtype_code  = kDLFloat;
constexpr int dtype_bits  = 32;
constexpr int dtype_lanes = 1;
constexpr int in_ndim     = 4;
const int64_t in_shape[in_ndim] = { 1, MODEL_HEI, MODEL_WID, 3 };

TVMArrayAlloc(in_shape ,  in_ndim, dtype_code, 
                                   dtype_bits, 
                                   dtype_lanes, device_type, device_id, &input);

module    = (tvm::runtime::Module*)handle;

set_input = module->GetFunction("set_input");

//********set output*********
int out_ndim = 2;
int64_t out_shape[2] = { MODEL_WID * MODEL_HEI, 2 };
int dtype_bits_out = 32;
int dtype_code_out = kDLFloat;

TVMArrayAlloc(out_shape, out_ndim, dtype_code_out, 
                                   dtype_bits_out, 
                                   dtype_lanes, device_type, device_id, &output);

run        = module->GetFunction("run");
get_output = module->GetFunction("get_output");

/* buffer init */
memset (input->data, 0, sizeof (float) * iModelWid * iModelHei);

return;

}

bool Infer_Deploy::Infer_Work( )
{
set_input(“preprocess/truediv”, input);
run();
get_output(0, output);

return true;
}
bool Infer_Deploy::forward(const cv::Mat & f_mat)
{


Infer_Deploy::Infer_Work(); /* tvm inference */


return true;
}

in the deployment on Windows , inference part was wrote as above. directly in the main of program, TVM.forward(resFrame),the inference can work correctly. but for the multi-thread environment, Infer_Work( ) always met error when ran to the run(); the program used tvm_runtime.dll to build which used 多线程 multi-thread DLL (/MD) , not from the tvm_runtime_pack.cc(which includes many runtime source files can be selected according to the target). Is the error related to this, must use the tvm_runtime_pack.cc to build for deployment not the tvm_runtime.dll

actually you can write an try catch to find the detail error message. and then, you may found the reason yourself.