TensorRT  7.2.1.6
NVIDIA TensorRT
Looking for a C++ dev who knows TensorRT?
I'm looking for work. Hire me!
FCPlugin Class Referenceabstract
Inheritance diagram for FCPlugin:
Collaboration diagram for FCPlugin:

Public Member Functions

 FCPlugin (const nvinfer1::Weights *weights, int nbWeights, int nbOutputChannels)
 
 FCPlugin (const void *data, size_t length)
 
 ~FCPlugin ()
 
int getNbOutputs () const override
 Get the number of outputs from the layer. More...
 
nvinfer1::Dims getOutputDimensions (int index, const nvinfer1::Dims *inputs, int nbInputDims) override
 
bool supportsFormat (nvinfer1::DataType type, nvinfer1::PluginFormat format) const override
 Check format support. More...
 
void configureWithFormat (const nvinfer1::Dims *inputDims, int nbInputs, const nvinfer1::Dims *outputDims, int nbOutputs, nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override
 
int initialize () override
 Initialize the layer for execution. More...
 
virtual void terminate () override
 Release resources acquired during plugin layer initialization. More...
 
virtual size_t getWorkspaceSize (int maxBatchSize) const override
 
virtual int enqueue (int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) override
 
virtual size_t getSerializationSize () override
 Find the size of the serialization buffer required. More...
 
virtual void serialize (void *buffer) override
 Serialize the layer. More...
 
virtual int32_t getTensorRTVersion () const
 Return the API version with which this plugin was built. More...
 
virtual void configureWithFormat (const Dims *inputDims, int32_t nbInputs, const Dims *outputDims, int32_t nbOutputs, DataType type, PluginFormat format, int32_t maxBatchSize)=0
 Configure the layer. More...
 
virtual Dims getOutputDimensions (int32_t index, const Dims *inputs, int32_t nbInputDims)=0
 Get the dimension of an output tensor. More...
 
virtual size_t getWorkspaceSize (int32_t maxBatchSize) const =0
 Find the workspace size required by the layer. More...
 
virtual int32_t enqueue (int32_t batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream)=0
 Execute the layer. More...
 

Protected Member Functions

void configure (const Dims *, int32_t, const Dims *, int32_t, int32_t)
 Derived classes should not implement this. More...
 

Private Member Functions

size_t type2size (nvinfer1::DataType type)
 
template<typename T >
void write (char *&buffer, const T &val)
 
template<typename T >
void read (const char *&buffer, T &val)
 
void * copyToDevice (const void *data, size_t count)
 
void convertAndCopyToDevice (void *&deviceWeights, const nvinfer1::Weights &weights)
 
void convertAndCopyToBuffer (char *&buffer, const nvinfer1::Weights &weights)
 
void deserializeToDevice (const char *&hostBuffer, void *&deviceWeights, size_t size)
 

Private Attributes

int mNbOutputChannels
 
int mNbInputChannels
 
nvinfer1::Weights mKernelWeights
 
nvinfer1::Weights mBiasWeights
 
nvinfer1::DataType mDataType {nvinfer1::DataType::kFLOAT}
 
void * mDeviceKernel {nullptr}
 
void * mDeviceBias {nullptr}
 
cudnnHandle_t mCudnn
 
cublasHandle_t mCublas
 
cudnnTensorDescriptor_t mSrcDescriptor
 
cudnnTensorDescriptor_t mDstDescriptor
 

Constructor & Destructor Documentation

◆ FCPlugin() [1/2]

FCPlugin::FCPlugin ( const nvinfer1::Weights weights,
int  nbWeights,
int  nbOutputChannels 
)
inline
Here is the call graph for this function:

◆ FCPlugin() [2/2]

FCPlugin::FCPlugin ( const void *  data,
size_t  length 
)
inline
Here is the call graph for this function:

◆ ~FCPlugin()

FCPlugin::~FCPlugin ( )
inline

Member Function Documentation

◆ getNbOutputs()

int FCPlugin::getNbOutputs ( ) const
inlineoverridevirtual

Get the number of outputs from the layer.

Returns
The number of outputs.

This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called prior to any call to initialize().

Implements nvinfer1::IPlugin.

◆ getOutputDimensions() [1/2]

nvinfer1::Dims FCPlugin::getOutputDimensions ( int  index,
const nvinfer1::Dims inputs,
int  nbInputDims 
)
inlineoverride

◆ supportsFormat()

bool FCPlugin::supportsFormat ( nvinfer1::DataType  type,
nvinfer1::PluginFormat  format 
) const
inlineoverridevirtual

Check format support.

Parameters
typeDataType requested.
formatPluginFormat requested.
Returns
true if the plugin supports the type-format combination.

This function is called by the implementations of INetworkDefinition, IBuilder, and ICudaEngine. In particular, it is called when creating an engine and when deserializing an engine.

Warning
DataType:kBOOL not supported.

Implements nvinfer1::IPluginExt.

◆ configureWithFormat() [1/2]

void FCPlugin::configureWithFormat ( const nvinfer1::Dims inputDims,
int  nbInputs,
const nvinfer1::Dims outputDims,
int  nbOutputs,
nvinfer1::DataType  type,
nvinfer1::PluginFormat  format,
int  maxBatchSize 
)
inlineoverride

◆ initialize()

int FCPlugin::initialize ( )
inlineoverridevirtual

Initialize the layer for execution.

This is called when the engine is created.

Returns
0 for success, else non-zero (which will cause engine termination).

Implements nvinfer1::IPlugin.

Here is the call graph for this function:

◆ terminate()

virtual void FCPlugin::terminate ( )
inlineoverridevirtual

Release resources acquired during plugin layer initialization.

This is called when the engine is destroyed.

See also
initialize()

Implements nvinfer1::IPlugin.

◆ getWorkspaceSize() [1/2]

virtual size_t FCPlugin::getWorkspaceSize ( int  maxBatchSize) const
inlineoverridevirtual

◆ enqueue() [1/2]

virtual int FCPlugin::enqueue ( int  batchSize,
const void *const *  inputs,
void **  outputs,
void *  workspace,
cudaStream_t  stream 
)
inlineoverridevirtual
Here is the call graph for this function:

◆ getSerializationSize()

virtual size_t FCPlugin::getSerializationSize ( )
inlineoverridevirtual

Find the size of the serialization buffer required.

Returns
The size of the serialization buffer.

Implements nvinfer1::IPlugin.

Here is the call graph for this function:
Here is the caller graph for this function:

◆ serialize()

virtual void FCPlugin::serialize ( void *  buffer)
inlineoverridevirtual

Serialize the layer.

Parameters
bufferA pointer to a buffer of size at least that returned by getSerializationSize().
See also
getSerializationSize()

Implements nvinfer1::IPlugin.

Here is the call graph for this function:

◆ type2size()

size_t FCPlugin::type2size ( nvinfer1::DataType  type)
inlineprivate
Here is the caller graph for this function:

◆ write()

template<typename T >
void FCPlugin::write ( char *&  buffer,
const T &  val 
)
inlineprivate
Here is the caller graph for this function:

◆ read()

template<typename T >
void FCPlugin::read ( const char *&  buffer,
T &  val 
)
inlineprivate
Here is the caller graph for this function:

◆ copyToDevice()

void* FCPlugin::copyToDevice ( const void *  data,
size_t  count 
)
inlineprivate
Here is the caller graph for this function:

◆ convertAndCopyToDevice()

void FCPlugin::convertAndCopyToDevice ( void *&  deviceWeights,
const nvinfer1::Weights weights 
)
inlineprivate
Here is the call graph for this function:
Here is the caller graph for this function:

◆ convertAndCopyToBuffer()

void FCPlugin::convertAndCopyToBuffer ( char *&  buffer,
const nvinfer1::Weights weights 
)
inlineprivate
Here is the call graph for this function:
Here is the caller graph for this function:

◆ deserializeToDevice()

void FCPlugin::deserializeToDevice ( const char *&  hostBuffer,
void *&  deviceWeights,
size_t  size 
)
inlineprivate
Here is the call graph for this function:
Here is the caller graph for this function:

◆ getTensorRTVersion()

virtual int32_t nvinfer1::IPluginExt::getTensorRTVersion ( ) const
inlinevirtualinherited

Return the API version with which this plugin was built.

Do not override this method as it is used by the TensorRT library to maintain backwards-compatibility with plugins.

◆ configureWithFormat() [2/2]

virtual void nvinfer1::IPluginExt::configureWithFormat ( const Dims inputDims,
int32_t  nbInputs,
const Dims outputDims,
int32_t  nbOutputs,
DataType  type,
PluginFormat  format,
int32_t  maxBatchSize 
)
pure virtualinherited

Configure the layer.

This function is called by the builder prior to initialize(). It provides an opportunity for the layer to make algorithm choices on the basis of its weights, dimensions, and maximum batch size.

Parameters
inputDimsThe input tensor dimensions.
nbInputsThe number of inputs.
outputDimsThe output tensor dimensions.
nbOutputsThe number of outputs.
typeThe data type selected for the engine.
formatThe format selected for the engine.
maxBatchSizeThe maximum batch size.

The dimensions passed here do not include the outermost batch size (i.e. for 2-D image networks, they will be 3-dimensional CHW dimensions).

Warning
DataType:kBOOL not supported.

◆ configure()

void nvinfer1::IPluginExt::configure ( const Dims ,
int32_t  ,
const Dims ,
int32_t  ,
int32_t   
)
inlineprotectedvirtualinherited

Derived classes should not implement this.

In a C++11 API it would be override final.

Implements nvinfer1::IPlugin.

◆ getOutputDimensions() [2/2]

virtual Dims nvinfer1::IPlugin::getOutputDimensions ( int32_t  index,
const Dims inputs,
int32_t  nbInputDims 
)
pure virtualinherited

Get the dimension of an output tensor.

Parameters
indexThe index of the output tensor.
inputsThe input tensors.
nbInputDimsThe number of input tensors.

This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called prior to any call to initialize().

◆ getWorkspaceSize() [2/2]

virtual size_t nvinfer1::IPlugin::getWorkspaceSize ( int32_t  maxBatchSize) const
pure virtualinherited

Find the workspace size required by the layer.

This function is called during engine startup, after initialize(). The workspace size returned should be sufficient for any batch size up to the maximum.

Returns
The workspace size.

◆ enqueue() [2/2]

virtual int32_t nvinfer1::IPlugin::enqueue ( int32_t  batchSize,
const void *const *  inputs,
void **  outputs,
void *  workspace,
cudaStream_t  stream 
)
pure virtualinherited

Execute the layer.

Parameters
batchSizeThe number of inputs in the batch.
inputsThe memory for the input tensors.
outputsThe memory for the output tensors.
workspaceWorkspace for execution.
streamThe stream in which to execute the kernels.
Returns
0 for success, else non-zero (which will cause engine termination).

Member Data Documentation

◆ mNbOutputChannels

int FCPlugin::mNbOutputChannels
private

◆ mNbInputChannels

int FCPlugin::mNbInputChannels
private

◆ mKernelWeights

nvinfer1::Weights FCPlugin::mKernelWeights
private

◆ mBiasWeights

nvinfer1::Weights FCPlugin::mBiasWeights
private

◆ mDataType

nvinfer1::DataType FCPlugin::mDataType {nvinfer1::DataType::kFLOAT}
private

◆ mDeviceKernel

void* FCPlugin::mDeviceKernel {nullptr}
private

◆ mDeviceBias

void* FCPlugin::mDeviceBias {nullptr}
private

◆ mCudnn

cudnnHandle_t FCPlugin::mCudnn
private

◆ mCublas

cublasHandle_t FCPlugin::mCublas
private

◆ mSrcDescriptor

cudnnTensorDescriptor_t FCPlugin::mSrcDescriptor
private

◆ mDstDescriptor

cudnnTensorDescriptor_t FCPlugin::mDstDescriptor
private

The documentation for this class was generated from the following file: