You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I seemed to have a bug when using tensorrt10.0.1.6. When converting to tensorrt model in the last step, I could not find the quantizer node, is it because I used a custom nonlinear loss function:MISH? Here is my code and model。
import os
import tensorrt as trt
from calibration import Calibrator
import pycuda.driver as cuda
import pycuda.autoinit
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
onnx_model_path = r"D:\Python_DL\venv\Experimental_result\mobilenetv2\mobilenetv2_SQ.onnx"
def get_engine(onnx_file_path="", engine_file_path="", calibrator=None, save_engine=False):
with trt.Builder(TRT_LOGGER) as builder, \
builder.create_builder_config() as config, \
builder.create_network(1) as network, \
trt.Runtime(TRT_LOGGER) as runtime, \
trt.OnnxParser(network, TRT_LOGGER) as parser:
if not os.path.exists(onnx_file_path):
quit('ONNX file {} not found'.format(onnx_file_path))
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
assert network.num_layers > 0, 'Failed to parse ONNX model. \
Please check if the ONNX model is compatible '
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = calibrator
print('Int8 mode enabled')
plan = builder.build_serialized_network(network, config)
if plan is None:
print('Failed to create the engine')
return None
print("Completed creating the engine")
engine = runtime.deserialize_cuda_engine(plan)
if save_engine:
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
def run_int8_quantization():
print('*** onnx to tensorrt int8 engine ***')
engine_model_path = "mobilenet_SQ_int8.engine"
runtime_engine = get_engine(onnx_model_path, engine_model_path, save_engine=True)
assert runtime_engine, 'failed engine generation...'
print('*** success to generate INT8 engine file ***\n')
if __name__ == '__main__':
run_int8_quantization()
I seemed to have a bug when using tensorrt10.0.1.6. When converting to tensorrt model in the last step, I could not find the quantizer node, is it because I used a custom nonlinear loss function:MISH? Here is my code and model。
this is onnxmodel
mobilenetv2_SQ.zip
The text was updated successfully, but these errors were encountered: