TensorRT  7.2.1.6
NVIDIA TensorRT
Looking for a C++ dev who knows TensorRT?
I'm looking for work. Hire me!
nmtSample::DebugUtil::DumpTensorPlugin Class Referenceabstract
Inheritance diagram for nmtSample::DebugUtil::DumpTensorPlugin:
Collaboration diagram for nmtSample::DebugUtil::DumpTensorPlugin:

Public Types

typedef std::shared_ptr< DumpTensorPluginptr
 

Public Member Functions

 DumpTensorPlugin (std::shared_ptr< std::ostream > out)
 
 ~DumpTensorPlugin () override=default
 
int getNbOutputs () const override
 Get the number of outputs from the layer. More...
 
nvinfer1::Dims getOutputDimensions (int index, const nvinfer1::Dims *inputs, int nbInputDims) override
 
void configure (const nvinfer1::Dims *inputDims, int nbInputs, const nvinfer1::Dims *outputDims, int nbOutputs, int maxBatchSize) override
 
int initialize () override
 Initialize the layer for execution. More...
 
void terminate () override
 Release resources acquired during plugin layer initialization. More...
 
size_t getWorkspaceSize (int maxBatchSize) const override
 
int enqueue (int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) override
 
size_t getSerializationSize () override
 Find the size of the serialization buffer required. More...
 
void serialize (void *buffer) override
 Serialize the layer. More...
 
virtual Dims getOutputDimensions (int32_t index, const Dims *inputs, int32_t nbInputDims)=0
 Get the dimension of an output tensor. More...
 
virtual void configure (const Dims *inputDims, int32_t nbInputs, const Dims *outputDims, int32_t nbOutputs, int32_t maxBatchSize)=0
 Configure the layer. More...
 
virtual size_t getWorkspaceSize (int32_t maxBatchSize) const =0
 Find the workspace size required by the layer. More...
 
virtual int32_t enqueue (int32_t batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream)=0
 Execute the layer. More...
 

Private Attributes

std::shared_ptr< std::ostream > mOut
 
nvinfer1::Dims mDims
 
int mTensorVolume
 
int mElemsPerRow
 
PinnedHostBuffer< float >::ptr mData
 

Member Typedef Documentation

◆ ptr

Constructor & Destructor Documentation

◆ DumpTensorPlugin()

nmtSample::DebugUtil::DumpTensorPlugin::DumpTensorPlugin ( std::shared_ptr< std::ostream >  out)

◆ ~DumpTensorPlugin()

nmtSample::DebugUtil::DumpTensorPlugin::~DumpTensorPlugin ( )
overridedefault

Member Function Documentation

◆ getNbOutputs()

int nmtSample::DebugUtil::DumpTensorPlugin::getNbOutputs ( ) const
overridevirtual

Get the number of outputs from the layer.

Returns
The number of outputs.

This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called prior to any call to initialize().

Implements nvinfer1::IPlugin.

◆ getOutputDimensions() [1/2]

nvinfer1::Dims nmtSample::DebugUtil::DumpTensorPlugin::getOutputDimensions ( int  index,
const nvinfer1::Dims inputs,
int  nbInputDims 
)
override

◆ configure() [1/2]

void nmtSample::DebugUtil::DumpTensorPlugin::configure ( const nvinfer1::Dims inputDims,
int  nbInputs,
const nvinfer1::Dims outputDims,
int  nbOutputs,
int  maxBatchSize 
)
override

◆ initialize()

int nmtSample::DebugUtil::DumpTensorPlugin::initialize ( )
overridevirtual

Initialize the layer for execution.

This is called when the engine is created.

Returns
0 for success, else non-zero (which will cause engine termination).

Implements nvinfer1::IPlugin.

◆ terminate()

void nmtSample::DebugUtil::DumpTensorPlugin::terminate ( )
overridevirtual

Release resources acquired during plugin layer initialization.

This is called when the engine is destroyed.

See also
initialize()

Implements nvinfer1::IPlugin.

◆ getWorkspaceSize() [1/2]

size_t nmtSample::DebugUtil::DumpTensorPlugin::getWorkspaceSize ( int  maxBatchSize) const
override

◆ enqueue() [1/2]

int nmtSample::DebugUtil::DumpTensorPlugin::enqueue ( int  batchSize,
const void *const *  inputs,
void **  outputs,
void *  workspace,
cudaStream_t  stream 
)
override

◆ getSerializationSize()

size_t nmtSample::DebugUtil::DumpTensorPlugin::getSerializationSize ( )
overridevirtual

Find the size of the serialization buffer required.

Returns
The size of the serialization buffer.

Implements nvinfer1::IPlugin.

◆ serialize()

void nmtSample::DebugUtil::DumpTensorPlugin::serialize ( void *  buffer)
overridevirtual

Serialize the layer.

Parameters
bufferA pointer to a buffer of size at least that returned by getSerializationSize().
See also
getSerializationSize()

Implements nvinfer1::IPlugin.

◆ getOutputDimensions() [2/2]

virtual Dims nvinfer1::IPlugin::getOutputDimensions ( int32_t  index,
const Dims inputs,
int32_t  nbInputDims 
)
pure virtualinherited

Get the dimension of an output tensor.

Parameters
indexThe index of the output tensor.
inputsThe input tensors.
nbInputDimsThe number of input tensors.

This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called prior to any call to initialize().

◆ configure() [2/2]

virtual void nvinfer1::IPlugin::configure ( const Dims inputDims,
int32_t  nbInputs,
const Dims outputDims,
int32_t  nbOutputs,
int32_t  maxBatchSize 
)
pure virtualinherited

Configure the layer.

This function is called by the builder prior to initialize(). It provides an opportunity for the layer to make algorithm choices on the basis of its weights, dimensions, and maximum batch size. The type is assumed to be FP32 and format NCHW.

Parameters
inputDimsThe input tensor dimensions.
nbInputsThe number of inputs.
outputDimsThe output tensor dimensions.
nbOutputsThe number of outputs.
maxBatchSizeThe maximum batch size.

The dimensions passed here do not include the outermost batch size (i.e. for 2-D image networks, they will be 3-dimensional CHW dimensions).

This method is not called for PluginExt classes, configureWithFormat is called instead.

Implemented in nvinfer1::IPluginExt.

◆ getWorkspaceSize() [2/2]

virtual size_t nvinfer1::IPlugin::getWorkspaceSize ( int32_t  maxBatchSize) const
pure virtualinherited

Find the workspace size required by the layer.

This function is called during engine startup, after initialize(). The workspace size returned should be sufficient for any batch size up to the maximum.

Returns
The workspace size.

◆ enqueue() [2/2]

virtual int32_t nvinfer1::IPlugin::enqueue ( int32_t  batchSize,
const void *const *  inputs,
void **  outputs,
void *  workspace,
cudaStream_t  stream 
)
pure virtualinherited

Execute the layer.

Parameters
batchSizeThe number of inputs in the batch.
inputsThe memory for the input tensors.
outputsThe memory for the output tensors.
workspaceWorkspace for execution.
streamThe stream in which to execute the kernels.
Returns
0 for success, else non-zero (which will cause engine termination).

Member Data Documentation

◆ mOut

std::shared_ptr<std::ostream> nmtSample::DebugUtil::DumpTensorPlugin::mOut
private

◆ mDims

nvinfer1::Dims nmtSample::DebugUtil::DumpTensorPlugin::mDims
private

◆ mTensorVolume

int nmtSample::DebugUtil::DumpTensorPlugin::mTensorVolume
private

◆ mElemsPerRow

int nmtSample::DebugUtil::DumpTensorPlugin::mElemsPerRow
private

◆ mData

PinnedHostBuffer<float>::ptr nmtSample::DebugUtil::DumpTensorPlugin::mData
private

The documentation for this class was generated from the following files: