From f2042bfb327fda8ba946cead7cf46766e8593030 Mon Sep 17 00:00:00 2001 From: Julien Jomier <219040+jjomier@users.noreply.github.com> Date: Wed, 2 Oct 2024 11:02:34 -0400 Subject: [PATCH] Fix TRT depreciation warnings (#523) Signed-off-by: Julien Jomier --- .../lstm_tensor_rt_inference/tensor_rt_inference.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/gxf_extensions/lstm_tensor_rt_inference/tensor_rt_inference.cpp b/gxf_extensions/lstm_tensor_rt_inference/tensor_rt_inference.cpp index 01caed35b..8449ce610 100644 --- a/gxf_extensions/lstm_tensor_rt_inference/tensor_rt_inference.cpp +++ b/gxf_extensions/lstm_tensor_rt_inference/tensor_rt_inference.cpp @@ -635,8 +635,13 @@ gxf::Expected> TensorRtInference::convertModelToEngine() { if (enable_fp16_.get()) { builderConfig->setFlag(nvinfer1::BuilderFlag::kFP16); } // Parses ONNX with explicit batch size for support of dynamic shapes/batch - NvInferHandle network(builder->createNetworkV2( - 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH))); + #if NV_TENSORRT_MAJOR < 10 + const auto explicitBatch = + 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); + #else + const auto explicitBatch = 1U; + #endif + NvInferHandle network(builder->createNetworkV2(explicitBatch)); NvInferHandle onnx_parser( nvonnxparser::createParser(*network, cuda_logger_));