|
def | __init__ (self, tacotron2) |
|
def | decode (self, decoder_input, in_attention_hidden, in_attention_cell, in_decoder_hidden, in_decoder_cell, in_attention_weights, in_attention_weights_cum, in_attention_context, memory, processed_memory, mask) |
|
def | forward (self, decoder_input, attention_hidden, attention_cell, decoder_hidden, decoder_cell, attention_weights, attention_weights_cum, attention_context, memory, processed_memory, mask) |
|
◆ __init__()
def export_tacotron2_onnx.DecoderIter.__init__ |
( |
|
self, |
|
|
|
tacotron2 |
|
) |
| |
◆ decode()
def export_tacotron2_onnx.DecoderIter.decode |
( |
|
self, |
|
|
|
decoder_input, |
|
|
|
in_attention_hidden, |
|
|
|
in_attention_cell, |
|
|
|
in_decoder_hidden, |
|
|
|
in_decoder_cell, |
|
|
|
in_attention_weights, |
|
|
|
in_attention_weights_cum, |
|
|
|
in_attention_context, |
|
|
|
memory, |
|
|
|
processed_memory, |
|
|
|
mask |
|
) |
| |
◆ forward()
def export_tacotron2_onnx.DecoderIter.forward |
( |
|
self, |
|
|
|
decoder_input, |
|
|
|
attention_hidden, |
|
|
|
attention_cell, |
|
|
|
decoder_hidden, |
|
|
|
decoder_cell, |
|
|
|
attention_weights, |
|
|
|
attention_weights_cum, |
|
|
|
attention_context, |
|
|
|
memory, |
|
|
|
processed_memory, |
|
|
|
mask |
|
) |
| |
◆ tacotron2
export_tacotron2_onnx.DecoderIter.tacotron2 |
◆ p_attention_dropout
export_tacotron2_onnx.DecoderIter.p_attention_dropout |
◆ p_decoder_dropout
export_tacotron2_onnx.DecoderIter.p_decoder_dropout |
◆ prenet
export_tacotron2_onnx.DecoderIter.prenet |
◆ attention_rnn
export_tacotron2_onnx.DecoderIter.attention_rnn |
◆ attention_layer
export_tacotron2_onnx.DecoderIter.attention_layer |
◆ decoder_rnn
export_tacotron2_onnx.DecoderIter.decoder_rnn |
◆ linear_projection
export_tacotron2_onnx.DecoderIter.linear_projection |
◆ gate_layer
export_tacotron2_onnx.DecoderIter.gate_layer |
The documentation for this class was generated from the following file: