TensorRT  7.2.1.6
NVIDIA TensorRT
Looking for a C++ dev who knows TensorRT?
I'm looking for work. Hire me!
nvinfer1::plugin::InstanceNormalizationPlugin Class Referencefinalabstract
Inheritance diagram for nvinfer1::plugin::InstanceNormalizationPlugin:
Collaboration diagram for nvinfer1::plugin::InstanceNormalizationPlugin:

Public Member Functions

 InstanceNormalizationPlugin (float epsilon, nvinfer1::Weights const &scale, nvinfer1::Weights const &bias)
 
 InstanceNormalizationPlugin (float epsilon, const std::vector< float > &scale, const std::vector< float > &bias)
 
 InstanceNormalizationPlugin (void const *serialData, size_t serialLength)
 
 InstanceNormalizationPlugin ()=delete
 
 ~InstanceNormalizationPlugin () override
 
int getNbOutputs () const override
 Get the number of outputs from the layer. More...
 
DimsExprs getOutputDimensions (int outputIndex, const nvinfer1::DimsExprs *inputs, int nbInputs, nvinfer1::IExprBuilder &exprBuilder) override
 
int initialize () override
 Initialize the layer for execution. More...
 
void terminate () override
 Release resources acquired during plugin layer initialization. More...
 
size_t getWorkspaceSize (const nvinfer1::PluginTensorDesc *inputs, int nbInputs, const nvinfer1::PluginTensorDesc *outputs, int nbOutputs) const override
 
int enqueue (const nvinfer1::PluginTensorDesc *inputDesc, const nvinfer1::PluginTensorDesc *outputDesc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) override
 Execute the layer. More...
 
size_t getSerializationSize () const override
 Find the size of the serialization buffer required. More...
 
void serialize (void *buffer) const override
 Serialize the layer. More...
 
bool supportsFormatCombination (int pos, const nvinfer1::PluginTensorDesc *inOut, int nbInputs, int nbOutputs) override
 
const char * getPluginType () const override
 Return the plugin type. More...
 
const char * getPluginVersion () const override
 Return the plugin version. More...
 
void destroy () override
 Destroy the plugin object. More...
 
nvinfer1::IPluginV2DynamicExtclone () const override
 Clone the plugin object. More...
 
void setPluginNamespace (const char *pluginNamespace) override
 Set the namespace that this plugin object belongs to. More...
 
const char * getPluginNamespace () const override
 Return the namespace of the plugin object. More...
 
DataType getOutputDataType (int index, const nvinfer1::DataType *inputTypes, int nbInputs) const override
 
void attachToContext (cudnnContext *cudnn, cublasContext *cublas, nvinfer1::IGpuAllocator *allocator) override
 Attach the plugin object to an execution context and grant the plugin the access to some context resource. More...
 
void detachFromContext () override
 Detach the plugin object from its execution context. More...
 
void configurePlugin (const nvinfer1::DynamicPluginTensorDesc *in, int nbInputs, const nvinfer1::DynamicPluginTensorDesc *out, int nbOutputs) override
 
virtual DimsExprs getOutputDimensions (int32_t outputIndex, const DimsExprs *inputs, int32_t nbInputs, IExprBuilder &exprBuilder)=0
 Get expressions for computing dimensions of an output tensor from dimensions of the input tensors. More...
 
virtual Dims getOutputDimensions (int32_t index, const Dims *inputs, int32_t nbInputDims)=0
 Get the dimension of an output tensor. More...
 
virtual bool supportsFormatCombination (int32_t pos, const PluginTensorDesc *inOut, int32_t nbInputs, int32_t nbOutputs)=0
 Return true if plugin supports the format and datatype for the input/output indexed by pos. More...
 
virtual void configurePlugin (const DynamicPluginTensorDesc *in, int32_t nbInputs, const DynamicPluginTensorDesc *out, int32_t nbOutputs)=0
 Configure the layer. More...
 
virtual void configurePlugin (const Dims *inputDims, int32_t nbInputs, const Dims *outputDims, int32_t nbOutputs, const DataType *inputTypes, const DataType *outputTypes, const bool *inputIsBroadcast, const bool *outputIsBroadcast, PluginFormat floatFormat, int32_t maxBatchSize)=0
 Configure the layer with input and output data types. More...
 
virtual size_t getWorkspaceSize (const PluginTensorDesc *inputs, int32_t nbInputs, const PluginTensorDesc *outputs, int32_t nbOutputs) const =0
 Find the workspace size required by the layer. More...
 
virtual size_t getWorkspaceSize (int32_t maxBatchSize) const =0
 Find the workspace size required by the layer. More...
 
virtual int32_t enqueue (int32_t batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream)=0
 Execute the layer. More...
 
virtual nvinfer1::DataType getOutputDataType (int32_t index, const nvinfer1::DataType *inputTypes, int32_t nbInputs) const =0
 Return the DataType of the plugin output at the requested index. More...
 
virtual bool isOutputBroadcastAcrossBatch (int32_t outputIndex, const bool *inputIsBroadcasted, int32_t nbInputs) const =0
 Return true if output tensor is broadcast across a batch. More...
 
virtual bool canBroadcastInputAcrossBatch (int32_t inputIndex) const =0
 Return true if plugin can use input that is broadcast across batch without replication. More...
 
virtual bool supportsFormat (DataType type, PluginFormat format) const =0
 Check format support. More...
 

Static Public Attributes

static constexpr int32_t kFORMAT_COMBINATION_LIMIT = 100
 Limit on number of format combinations accepted. More...
 

Protected Member Functions

int32_t getTensorRTVersion () const
 Return the API version with which this plugin was built. More...
 
 __attribute__ ((deprecated)) Dims getOutputDimensions(int32_t
 Derived classes should not implement this. More...
 
 __attribute__ ((deprecated)) bool isOutputBroadcastAcrossBatch(int32_t
 Derived classes should not implement this. More...
 
 __attribute__ ((deprecated)) bool canBroadcastInputAcrossBatch(int32_t) const
 Derived classes should not implement this. More...
 
 __attribute__ ((deprecated)) bool supportsFormat(DataType
 Derived classes should not implement this. More...
 
 __attribute__ ((deprecated)) void configurePlugin(const Dims *
 Derived classes should not implement this. More...
 
void configureWithFormat (const Dims *, int32_t, const Dims *, int32_t, DataType, PluginFormat, int32_t)
 Derived classes should not implement this. More...
 

Protected Attributes

const Dims int32_t
 
const bool const int32_t
 
 int32_t
 
const PluginFormat
 
const Dims const DataType const DataType const bool const bool PluginFormat
 

Private Attributes

float _epsilon
 
int _nchan
 
std::vector< float > _h_scale
 
std::vector< float > _h_bias
 
float * _d_scale
 
float * _d_bias
 
size_t _d_bytes
 
cudnnHandle_t _cudnn_handle
 
cudnnTensorDescriptor_t _x_desc
 
cudnnTensorDescriptor_t _y_desc
 
cudnnTensorDescriptor_t _b_desc
 
std::string mPluginNamespace
 

Constructor & Destructor Documentation

◆ InstanceNormalizationPlugin() [1/4]

InstanceNormalizationPlugin::InstanceNormalizationPlugin ( float  epsilon,
nvinfer1::Weights const &  scale,
nvinfer1::Weights const &  bias 
)

◆ InstanceNormalizationPlugin() [2/4]

InstanceNormalizationPlugin::InstanceNormalizationPlugin ( float  epsilon,
const std::vector< float > &  scale,
const std::vector< float > &  bias 
)

◆ InstanceNormalizationPlugin() [3/4]

InstanceNormalizationPlugin::InstanceNormalizationPlugin ( void const *  serialData,
size_t  serialLength 
)

◆ InstanceNormalizationPlugin() [4/4]

nvinfer1::plugin::InstanceNormalizationPlugin::InstanceNormalizationPlugin ( )
delete

◆ ~InstanceNormalizationPlugin()

InstanceNormalizationPlugin::~InstanceNormalizationPlugin ( )
override
Here is the call graph for this function:

Member Function Documentation

◆ getNbOutputs()

int InstanceNormalizationPlugin::getNbOutputs ( ) const
overridevirtual

Get the number of outputs from the layer.

Returns
The number of outputs.

This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called prior to any call to initialize().

Implements nvinfer1::IPluginV2.

◆ getOutputDimensions() [1/3]

DimsExprs InstanceNormalizationPlugin::getOutputDimensions ( int  outputIndex,
const nvinfer1::DimsExprs inputs,
int  nbInputs,
nvinfer1::IExprBuilder exprBuilder 
)
override

◆ initialize()

int InstanceNormalizationPlugin::initialize ( )
overridevirtual

Initialize the layer for execution.

This is called when the engine is created.

Returns
0 for success, else non-zero (which will cause engine termination).

Implements nvinfer1::IPluginV2.

◆ terminate()

void InstanceNormalizationPlugin::terminate ( )
overridevirtual

Release resources acquired during plugin layer initialization.

This is called when the engine is destroyed.

See also
initialize()

Implements nvinfer1::IPluginV2.

Here is the caller graph for this function:

◆ getWorkspaceSize() [1/3]

size_t InstanceNormalizationPlugin::getWorkspaceSize ( const nvinfer1::PluginTensorDesc inputs,
int  nbInputs,
const nvinfer1::PluginTensorDesc outputs,
int  nbOutputs 
) const
override

◆ enqueue() [1/2]

int InstanceNormalizationPlugin::enqueue ( const nvinfer1::PluginTensorDesc inputDesc,
const nvinfer1::PluginTensorDesc outputDesc,
const void *const *  inputs,
void *const *  outputs,
void *  workspace,
cudaStream_t  stream 
)
overridevirtual

Execute the layer.

Parameters
inputDeschow to interpret the memory for the input tensors.
outputDeschow to interpret the memory for the output tensors.
inputsThe memory for the input tensors.
outputsThe memory for the output tensors.
workspaceWorkspace for execution.
streamThe stream in which to execute the kernels.
Returns
0 for success, else non-zero (which will cause engine termination).

Implements nvinfer1::IPluginV2DynamicExt.

◆ getSerializationSize()

size_t InstanceNormalizationPlugin::getSerializationSize ( ) const
overridevirtual

Find the size of the serialization buffer required.

Returns
The size of the serialization buffer.

Implements nvinfer1::IPluginV2.

◆ serialize()

void InstanceNormalizationPlugin::serialize ( void *  buffer) const
overridevirtual

Serialize the layer.

Parameters
bufferA pointer to a buffer to serialize data. Size of buffer must be equal to value returned by getSerializationSize.
See also
getSerializationSize()

Implements nvinfer1::IPluginV2.

◆ supportsFormatCombination() [1/2]

bool InstanceNormalizationPlugin::supportsFormatCombination ( int  pos,
const nvinfer1::PluginTensorDesc inOut,
int  nbInputs,
int  nbOutputs 
)
override

◆ getPluginType()

const char * InstanceNormalizationPlugin::getPluginType ( ) const
overridevirtual

Return the plugin type.

Should match the plugin name returned by the corresponding plugin creator

See also
IPluginCreator::getPluginName()

Implements nvinfer1::IPluginV2.

◆ getPluginVersion()

const char * InstanceNormalizationPlugin::getPluginVersion ( ) const
overridevirtual

Return the plugin version.

Should match the plugin version returned by the corresponding plugin creator

See also
IPluginCreator::getPluginVersion()

Implements nvinfer1::IPluginV2.

◆ destroy()

void InstanceNormalizationPlugin::destroy ( )
overridevirtual

Destroy the plugin object.

This will be called when the network, builder or engine is destroyed.

Implements nvinfer1::IPluginV2.

◆ clone()

IPluginV2DynamicExt * InstanceNormalizationPlugin::clone ( ) const
overridevirtual

Clone the plugin object.

This copies over internal plugin parameters as well and returns a new plugin object with these parameters. If the source plugin is pre-configured with configurePlugin(), the returned object should also be pre-configured. The returned object should allow attachToContext() with a new execution context. Cloned plugin objects can share the same per-engine immutable resource (e.g. weights) with the source object (e.g. via ref-counting) to avoid duplication.

Implements nvinfer1::IPluginV2DynamicExt.

◆ setPluginNamespace()

void InstanceNormalizationPlugin::setPluginNamespace ( const char *  pluginNamespace)
overridevirtual

Set the namespace that this plugin object belongs to.

Ideally, all plugin objects from the same plugin library should have the same namespace.

Implements nvinfer1::IPluginV2.

Here is the caller graph for this function:

◆ getPluginNamespace()

const char * InstanceNormalizationPlugin::getPluginNamespace ( ) const
overridevirtual

Return the namespace of the plugin object.

Implements nvinfer1::IPluginV2.

◆ getOutputDataType() [1/2]

nvinfer1::DataType InstanceNormalizationPlugin::getOutputDataType ( int  index,
const nvinfer1::DataType inputTypes,
int  nbInputs 
) const
override

◆ attachToContext()

void InstanceNormalizationPlugin::attachToContext ( cudnnContext *  ,
cublasContext *  ,
nvinfer1::IGpuAllocator  
)
overridevirtual

Attach the plugin object to an execution context and grant the plugin the access to some context resource.

Parameters
cudnnThe cudnn context handle of the execution context
cublasThe cublas context handle of the execution context
allocatorThe allocator used by the execution context

This function is called automatically for each plugin when a new execution context is created. If the plugin needs per-context resource, it can be allocated here. The plugin can also get context-owned CUDNN and CUBLAS context here.

Reimplemented from nvinfer1::IPluginV2Ext.

◆ detachFromContext()

void InstanceNormalizationPlugin::detachFromContext ( )
overridevirtual

Detach the plugin object from its execution context.

This function is called automatically for each plugin when a execution context is destroyed. If the plugin owns per-context resource, it can be released here.

Reimplemented from nvinfer1::IPluginV2Ext.

◆ configurePlugin() [1/3]

void InstanceNormalizationPlugin::configurePlugin ( const nvinfer1::DynamicPluginTensorDesc in,
int  nbInputs,
const nvinfer1::DynamicPluginTensorDesc out,
int  nbOutputs 
)
override

◆ getOutputDimensions() [2/3]

virtual DimsExprs nvinfer1::IPluginV2DynamicExt::getOutputDimensions ( int32_t  outputIndex,
const DimsExprs inputs,
int32_t  nbInputs,
IExprBuilder exprBuilder 
)
pure virtualinherited

Get expressions for computing dimensions of an output tensor from dimensions of the input tensors.

Parameters
outputIndexThe index of the output tensor
inputsExpressions for dimensions of the input tensors
nbInputDimsThe number of input tensors
exprBuilderObject for generating new expressions

This function is called by the implementations of IBuilder during analysis of the network.

Example #1: A plugin has a single output that transposes the last two dimensions of the plugin's single input. The body of the override of getOutputDimensions can be:

DimsExprs output(inputs[0]);
std::swap(output.d[output.nbDims-1], output.d[output.nbDims-2]);
return output;

Example #2: A plugin concatenates its two inputs along the first dimension. The body of the override of getOutputDimensions can be:

DimsExprs output(inputs[0]);
output.d[0] = exprBuilder.operation(DimensionOperation::kSUM, *inputs[0].d[0], *inputs[1].d[0]);
return output;

◆ getOutputDimensions() [3/3]

virtual Dims nvinfer1::IPluginV2::getOutputDimensions ( int32_t  index,
const Dims inputs,
int32_t  nbInputDims 
)
pure virtualinherited

Get the dimension of an output tensor.

Parameters
indexThe index of the output tensor.
inputsThe input tensors.
nbInputDimsThe number of input tensors.

This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called prior to any call to initialize().

◆ supportsFormatCombination() [2/2]

virtual bool nvinfer1::IPluginV2DynamicExt::supportsFormatCombination ( int32_t  pos,
const PluginTensorDesc inOut,
int32_t  nbInputs,
int32_t  nbOutputs 
)
pure virtualinherited

Return true if plugin supports the format and datatype for the input/output indexed by pos.

For this method inputs are numbered 0..(nbInputs-1) and outputs are numbered nbInputs..(nbInputs+nbOutputs-1). Using this numbering, pos is an index into InOut, where 0 <= pos < nbInputs+nbOutputs-1.

TensorRT invokes this method to ask if the input/output indexed by pos supports the format/datatype specified by inOut[pos].format and inOut[pos].type. The override should return true if that format/datatype at inOut[pos] are supported by the plugin. If support is conditional on other input/output formats/datatypes, the plugin can make its result conditional on the formats/datatypes in inOut[0..pos-1], which will be set to values that the plugin supports. The override should not inspect inOut[pos+1..nbInputs+nbOutputs-1], which will have invalid values. In other words, the decision for pos must be based on inOut[0..pos] only.

Some examples:

  • A definition for a plugin that supports only FP16 NCHW:
      return inOut.format[pos] == TensorFormat::kLINEAR && inOut.type[pos] == DataType::kHALF;
    
  • A definition for a plugin that supports only FP16 NCHW for its two inputs, and FP32 NCHW for its single output:
      return inOut.format[pos] == TensorFormat::kLINEAR && (inOut.type[pos] == pos < 2 ?  DataType::kHALF :
      DataType::kFLOAT);
    
  • A definition for a "polymorphic" plugin with two inputs and one output that supports any format or type, but the inputs and output must have the same format and type:
      return pos == 0 || (inOut.format[pos] == inOut.format[0] && inOut.type[pos] == inOut.type[0]);
    

Warning: TensorRT will stop asking for formats once it finds kFORMAT_COMBINATION_LIMIT on combinations.

◆ configurePlugin() [2/3]

virtual void nvinfer1::IPluginV2DynamicExt::configurePlugin ( const DynamicPluginTensorDesc in,
int32_t  nbInputs,
const DynamicPluginTensorDesc out,
int32_t  nbOutputs 
)
pure virtualinherited

Configure the layer.

This function is called by the builder prior to initialize(). It provides an opportunity for the layer to make algorithm choices on the basis of bounds on the input and output tensors, and the target value.

This function is also called once when the resource requirements are changed based on the optimization profiles.

Parameters
inThe input tensors attributes that are used for configuration.
nbInputsNumber of input tensors.
outThe output tensors attributes that are used for configuration.
nbOutputsNumber of output tensors.

◆ configurePlugin() [3/3]

virtual void nvinfer1::IPluginV2Ext::configurePlugin ( const Dims inputDims,
int32_t  nbInputs,
const Dims outputDims,
int32_t  nbOutputs,
const DataType inputTypes,
const DataType outputTypes,
const bool *  inputIsBroadcast,
const bool *  outputIsBroadcast,
PluginFormat  floatFormat,
int32_t  maxBatchSize 
)
pure virtualinherited

Configure the layer with input and output data types.

This function is called by the builder prior to initialize(). It provides an opportunity for the layer to make algorithm choices on the basis of its weights, dimensions, data types and maximum batch size.

Parameters
inputDimsThe input tensor dimensions.
nbInputsThe number of inputs.
outputDimsThe output tensor dimensions.
nbOutputsThe number of outputs.
inputTypesThe data types selected for the plugin inputs.
outputTypesThe data types selected for the plugin outputs.
inputIsBroadcastTrue for each input that the plugin must broadcast across the batch.
outputIsBroadcastTrue for each output that TensorRT will broadcast across the batch.
floatFormatThe format selected for the engine for the floating point inputs/outputs.
maxBatchSizeThe maximum batch size.

The dimensions passed here do not include the outermost batch size (i.e. for 2-D image networks, they will be 3-dimensional CHW dimensions). When inputIsBroadcast or outputIsBroadcast is true, the outermost batch size for that input or output should be treated as if it is one. inputIsBroadcast[i] is true only if the input is semantically broadcast across the batch and canBroadcastInputAcrossBatch(i) returned true. outputIsBroadcast[i] is true only if isOutputBroadcastAcrossBatch(i) returned true.

Warning
for the floatFormat field, the values PluginFormat::kCHW4, PluginFormat::kCHW16, and PluginFormat::kCHW32 will not be passed in, this is to keep backward compatibility with TensorRT 5.x series. Use PluginV2IOExt or PluginV2DynamicExt for other PluginFormats.

◆ getWorkspaceSize() [2/3]

virtual size_t nvinfer1::IPluginV2DynamicExt::getWorkspaceSize ( const PluginTensorDesc inputs,
int32_t  nbInputs,
const PluginTensorDesc outputs,
int32_t  nbOutputs 
) const
pure virtualinherited

Find the workspace size required by the layer.

This function is called after the plugin is configured, and possibly during execution. The result should be a sufficient workspace size to deal with inputs and outputs of the given size or any smaller problem.

Returns
The workspace size.

◆ getWorkspaceSize() [3/3]

virtual size_t nvinfer1::IPluginV2::getWorkspaceSize ( int32_t  maxBatchSize) const
pure virtualinherited

Find the workspace size required by the layer.

This function is called during engine startup, after initialize(). The workspace size returned should be sufficient for any batch size up to the maximum.

Returns
The workspace size.

◆ enqueue() [2/2]

virtual int32_t nvinfer1::IPluginV2::enqueue ( int32_t  batchSize,
const void *const *  inputs,
void **  outputs,
void *  workspace,
cudaStream_t  stream 
)
pure virtualinherited

Execute the layer.

Parameters
batchSizeThe number of inputs in the batch.
inputsThe memory for the input tensors.
outputsThe memory for the output tensors.
workspaceWorkspace for execution.
streamThe stream in which to execute the kernels.
Returns
0 for success, else non-zero (which will cause engine termination).

◆ getTensorRTVersion()

int32_t nvinfer1::IPluginV2DynamicExt::getTensorRTVersion ( ) const
inlineprotectedvirtualinherited

Return the API version with which this plugin was built.

Do not override this method as it is used by the TensorRT library to maintain backwards-compatibility with plugins.

Reimplemented from nvinfer1::IPluginV2.

◆ __attribute__() [1/5]

nvinfer1::IPluginV2DynamicExt::__attribute__ ( (deprecated)  )
protectedinherited

Derived classes should not implement this.

In a C++11 API it would be override final.

Instead, derived classes should override the overload of getOutputDimensions that returns DimsExprs.

Deprecated:
Deprecated interface will be removed in TensorRT 8.0.

◆ __attribute__() [2/5]

nvinfer1::IPluginV2DynamicExt::__attribute__ ( (deprecated)  )
protectedinherited

Derived classes should not implement this.

In a C++11 API it would be override final.

This method is not used because with dynamic shapes there is no implicit batch dimension to broadcast across.

Deprecated:
Deprecated interface will be removed in TensorRT 8.0.

◆ __attribute__() [3/5]

nvinfer1::IPluginV2DynamicExt::__attribute__ ( (deprecated)  ) const
inlineprotectedinherited

Derived classes should not implement this.

In a C++11 API it would be override final.

This method is not used because with dynamic shapes there is no implicit batch dimension to broadcast across.

Deprecated:
Deprecated interface will be removed in TensorRT 8.0.

◆ __attribute__() [4/5]

nvinfer1::IPluginV2DynamicExt::__attribute__ ( (deprecated)  )
protectedinherited

Derived classes should not implement this.

In a C++11 API it would be override final.

This method is not used because it does not allow a plugin to specify mixed formats.

Instead, derived classes should override supportsFormatCombination, which allows plugins to express mixed formats.

Deprecated:
Deprecated interface will be removed in TensorRT 8.0.

◆ __attribute__() [5/5]

nvinfer1::IPluginV2DynamicExt::__attribute__ ( (deprecated)  ) const
protectedinherited

Derived classes should not implement this.

In a C++11 API it would be override final.

This method is not used because tensors with dynamic shapes do not have an implicit batch dimension, input dimensions might be variable, and outputs might have different floating-point formats.

Instead, derived classes should override the overload of configurePlugin that takes poiners to DynamicPluginTensorDesc.

Deprecated:
Deprecated interface will be removed in TensorRT 8.0.

◆ getOutputDataType() [2/2]

virtual nvinfer1::DataType nvinfer1::IPluginV2Ext::getOutputDataType ( int32_t  index,
const nvinfer1::DataType inputTypes,
int32_t  nbInputs 
) const
pure virtualinherited

Return the DataType of the plugin output at the requested index.

The default behavior should be to return the type of the first input, or DataType::kFLOAT if the layer has no inputs. The returned data type must have a format that is supported by the plugin.

See also
supportsFormat()
Warning
DataType:kBOOL not supported.

◆ isOutputBroadcastAcrossBatch()

virtual bool nvinfer1::IPluginV2Ext::isOutputBroadcastAcrossBatch ( int32_t  outputIndex,
const bool *  inputIsBroadcasted,
int32_t  nbInputs 
) const
pure virtualinherited

Return true if output tensor is broadcast across a batch.

Parameters
outputIndexThe index of the output
inputIsBroadcastedThe ith element is true if the tensor for the ith input is broadcast across a batch.
nbInputsThe number of inputs

The values in inputIsBroadcasted refer to broadcasting at the semantic level, i.e. are unaffected by whether method canBroadcastInputAcrossBatch requests physical replication of the values.

◆ canBroadcastInputAcrossBatch()

virtual bool nvinfer1::IPluginV2Ext::canBroadcastInputAcrossBatch ( int32_t  inputIndex) const
pure virtualinherited

Return true if plugin can use input that is broadcast across batch without replication.

Parameters
inputIndexIndex of input that could be broadcast.

For each input whose tensor is semantically broadcast across a batch, TensorRT calls this method before calling configurePlugin. If canBroadcastInputAcrossBatch returns true, TensorRT will not replicate the input tensor; i.e., there will be a single copy that the plugin should share across the batch. If it returns false, TensorRT will replicate the input tensor so that it appears like a non-broadcasted tensor.

This method is called only for inputs that can be broadcast.

◆ configureWithFormat()

void nvinfer1::IPluginV2Ext::configureWithFormat ( const Dims ,
int32_t  ,
const Dims ,
int32_t  ,
DataType  ,
PluginFormat  ,
int32_t   
)
inlineprotectedvirtualinherited

Derived classes should not implement this.

In a C++11 API it would be override final.

Implements nvinfer1::IPluginV2.

◆ supportsFormat()

virtual bool nvinfer1::IPluginV2::supportsFormat ( DataType  type,
PluginFormat  format 
) const
pure virtualinherited

Check format support.

Parameters
typeDataType requested.
formatPluginFormat requested.
Returns
true if the plugin supports the type-format combination.

This function is called by the implementations of INetworkDefinition, IBuilder, and safe::ICudaEngine/ICudaEngine. In particular, it is called when creating an engine and when deserializing an engine.

Warning
for the format field, the values PluginFormat::kCHW4, PluginFormat::kCHW16, and PluginFormat::kCHW32 will not be passed in, this is to keep backward compatibility with TensorRT 5.x series. Use PluginV2IOExt or PluginV2DynamicExt for other PluginFormats.
DataType:kBOOL not supported.

Implemented in nvinfer1::plugin::FlattenConcat, nvinfer1::plugin::ProposalPlugin, nvinfer1::plugin::CropAndResizePlugin, nvinfer1::plugin::SpecialSlice, nvinfer1::plugin::ProposalLayer, nvinfer1::plugin::BatchTilePlugin, nvinfer1::plugin::CoordConvACPlugin, nvinfer1::plugin::GenerateDetection, nvinfer1::plugin::MultilevelCropAndResize, nvinfer1::plugin::PyramidROIAlign, nvinfer1::plugin::DetectionLayer, nvinfer1::plugin::MultilevelProposeROI, nvinfer1::plugin::ResizeNearest, nvinfer1::plugin::LReLU, nvinfer1::plugin::Normalize, nvinfer1::plugin::DetectionOutput, nvinfer1::plugin::RPROIPlugin, nvinfer1::plugin::PriorBox, nvinfer1::plugin::Region, nvinfer1::plugin::Reorg, nvinfer1::plugin::GridAnchorGenerator, and nvinfer1::plugin::BatchedNMSPlugin.

Member Data Documentation

◆ _epsilon

float nvinfer1::plugin::InstanceNormalizationPlugin::_epsilon
private

◆ _nchan

int nvinfer1::plugin::InstanceNormalizationPlugin::_nchan
private

◆ _h_scale

std::vector<float> nvinfer1::plugin::InstanceNormalizationPlugin::_h_scale
private

◆ _h_bias

std::vector<float> nvinfer1::plugin::InstanceNormalizationPlugin::_h_bias
private

◆ _d_scale

float* nvinfer1::plugin::InstanceNormalizationPlugin::_d_scale
private

◆ _d_bias

float* nvinfer1::plugin::InstanceNormalizationPlugin::_d_bias
private

◆ _d_bytes

size_t nvinfer1::plugin::InstanceNormalizationPlugin::_d_bytes
private

◆ _cudnn_handle

cudnnHandle_t nvinfer1::plugin::InstanceNormalizationPlugin::_cudnn_handle
private

◆ _x_desc

cudnnTensorDescriptor_t nvinfer1::plugin::InstanceNormalizationPlugin::_x_desc
private

◆ _y_desc

cudnnTensorDescriptor_t nvinfer1::plugin::InstanceNormalizationPlugin::_y_desc
private

◆ _b_desc

cudnnTensorDescriptor_t nvinfer1::plugin::InstanceNormalizationPlugin::_b_desc
private

◆ mPluginNamespace

std::string nvinfer1::plugin::InstanceNormalizationPlugin::mPluginNamespace
private

◆ kFORMAT_COMBINATION_LIMIT

constexpr int32_t nvinfer1::IPluginV2DynamicExt::kFORMAT_COMBINATION_LIMIT = 100
staticconstexprinherited

Limit on number of format combinations accepted.

◆ int32_t [1/3]

const Dims nvinfer1::IPluginV2DynamicExt::int32_t
protectedinherited
Initial value:
{
return Dims{-1, {}, {}}

◆ int32_t [2/3]

const bool const nvinfer1::IPluginV2DynamicExt::int32_t
protectedinherited
Initial value:
{
return false

◆ int32_t [3/3]

nvinfer1::IPluginV2DynamicExt::int32_t
protectedinherited

◆ PluginFormat [1/2]

const nvinfer1::IPluginV2DynamicExt::PluginFormat
protectedinherited
Initial value:
{
return false

◆ PluginFormat [2/2]

const Dims const DataType const DataType const bool const bool nvinfer1::IPluginV2DynamicExt::PluginFormat
protectedinherited

The documentation for this class was generated from the following files: