TRT_LOGGER = trt.Logger()
model_path='FashionMNIST.onnx'
engine_file_path = "FashionMNIST.trt"
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)#batchsize=1
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH)\
as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 28
builder.max_batch_size = 1
if not os.path.exists(model_path):
print('ONNX file {} not found.'.format(model_path))
exit(0)
print('Loading ONNX file from path {}...'.format(model_path))
with open(model_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print ('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print (parser.get_error(error))
network.get_input(0).shape = [1, 1, 28, 28]
print('Completed parsing of ONNX file')
engine = builder.build_cuda_engine(network)
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
运行结果如下:
接下来我们就使用这个trt文件进行推理
import pycuda.driver as cuda
import pycuda.autoinit
import cv2
import numpy as np
import os
import tensorrt as trt
TRT_LOGGER = trt.Logger()
model_path='FashionMNIST.onnx'
engine_file_path = "FashionMNIST.trt"
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def do_inference_v2(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime,\
runtime.deserialize_cuda_engine(f.read()) as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = allocate_buffers(engine)
image = cv2.imread('123.jpg',cv2.IMREAD_GRAYSCALE)
image=cv2.resize(image,(28,28))
print(image.shape)
image=image[np.newaxis,np.newaxis,:,:].astype(np.float32)
inputs[0].host = image
#开始推理
trt_outputs =do_inference_v2(context, bindings=bindings, \
inputs=inputs, outputs=outputs, stream=stream)
print(trt_outputs)
运行结果:
和这个文章运行的结果一致,至此python版使用结束
tensorrt作为一个优秀的gpu推理引擎,支持的深度学习框架和算子也十分的丰富这里模型依然使用这个模型文件转换onnx为rrt文件rrt作为tensorrt推理的引擎文件因此使用其他机器学习的框架都需进行转换转换如下:import osimport tensorrt as trtTRT_LOGGER = trt.Logger()model_path='FashionMNIST.onnx'engine_file_path = "FashionMNIST.trt"EXP
资源包含文件:设计报告word+源码
RRT算法是一种纯粹的随机搜索算 法,对环境类型不敏感。为了改进RRT搜索空间的盲目性、节点拓展环节缺乏记忆性的缺点,提高空间搜索速度,在RRT算法的基础上,又有双向RRT算法。双向RRT算法有两棵树,具有双向搜索的引导策略,并且在生长方式的 基础上加上了贪婪策略加快了搜索速度,并且减少空白区域的无用搜索,节省搜索时间。
双向RRT算法的其中一棵树以另一棵树最后生成的节点作为新的拓展方向。如果拓展成功则继续往该方向拓展,直到不能拓展为止。下面的说明以从终点开始拓展的树作为例子。
需要说明的是,由持续拓展直到不能拓展的算法,可能会得到两棵树的节点数不平衡的状态。因此,当一棵树拓展完时,到下一次拓展前进行判断,哪棵树的节点数较小就拓展哪棵树,从而保证两棵树的节点数尽量相等。
详细介绍参考:https://blog.csdn.net/newlw/article/details/12
ONNX-TensorRT:用于ONNX的TensorRT后端用于ONNX的TensorRT后端解析ONNX模型以与TensorRT一起执行。
另请参阅TensorRT文档。
受支持的TensorRT版本开发Master分支上的开发适用于具有完整尺寸和动态形状支持的TensorRT 7.1的最新版本。
对于TensorRT的早期版本,请参考其各自的分支。
完整尺寸+动态形状在具有动态形状支持的完整尺寸模式下构建INetwork对象需要调用以下API:C ++ const auto
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
def printShape(engine):
for i in r.
楼主曾经在TensorRT(7):python版本使用入门一文中简要记录了python版本是序列化与反序列化加载模型的步骤,但因为环境以及TRT版本不同,API也有相当大的变化,这里重新记录下,在windows下,tensorrt8.2.3.0版本下,调用python的API是如何加载模型的。
实验案例:采用 yolov5的onnx模型,进行FP16量化保存模型。
代码案例均来自 TensorRT提供的sample中。