pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

caffe2.python.models.seq2seq.seq2seq_util Namespace Reference

Classes

class  LSTMWithAttentionDecoder
 

Functions

def gen_vocab (corpus, unk_threshold)
 
def get_numberized_sentence (sentence, vocab)
 
def rnn_unidirectional_layer (model, inputs, input_lengths, input_size, num_units, dropout_keep_prob, forward_only, return_sequence_output, return_final_state, scope=None)
 
def rnn_bidirectional_layer (model, inputs, input_lengths, input_size, num_units, dropout_keep_prob, forward_only, return_sequence_output, return_final_state, scope=None)
 
def build_embeddings (model, vocab_size, embedding_size, name, freeze_embeddings)
 
def get_layer_scope (scope, layer_type, i)
 
def build_embedding_encoder (model, encoder_params, num_decoder_layers, inputs, input_lengths, vocab_size, embeddings, embedding_size, use_attention, num_gpus=0, forward_only=False, scope=None)
 
def build_initial_rnn_decoder_states (model, encoder_units_per_layer, decoder_units_per_layer, final_encoder_hidden_states, final_encoder_cell_states, use_attention)
 
def build_embedding_decoder (model, decoder_layer_configs, inputs, input_lengths, encoder_lengths, encoder_outputs, weighted_encoder_outputs, final_encoder_hidden_states, final_encoder_cell_states, encoder_units_per_layer, vocab_size, embeddings, embedding_size, attention_type, forward_only, num_gpus=0, scope=None)
 
def output_projection (model, decoder_outputs, decoder_output_size, target_vocab_size, decoder_softmax_size)
 

Variables

int PAD_ID = 0
 
string PAD = '<PAD>'
 
int GO_ID = 1
 
string GO = '<GO>'
 
int EOS_ID = 2
 
string EOS = '<EOS>'
 
int UNK_ID = 3
 
string UNK = '<UNK>'
 

Function Documentation

◆ build_embedding_decoder()

def caffe2.python.models.seq2seq.seq2seq_util.build_embedding_decoder (   model,
  decoder_layer_configs,
  inputs,
  input_lengths,
  encoder_lengths,
  encoder_outputs,
  weighted_encoder_outputs,
  final_encoder_hidden_states,
  final_encoder_cell_states,
  encoder_units_per_layer,
  vocab_size,
  embeddings,
  embedding_size,
  attention_type,
  forward_only,
  num_gpus = 0,
  scope = None 
)

◆ build_embedding_encoder()

def caffe2.python.models.seq2seq.seq2seq_util.build_embedding_encoder (   model,
  encoder_params,
  num_decoder_layers,
  inputs,
  input_lengths,
  vocab_size,
  embeddings,
  embedding_size,
  use_attention,
  num_gpus = 0,
  forward_only = False,
  scope = None 
)

◆ build_embeddings()

def caffe2.python.models.seq2seq.seq2seq_util.build_embeddings (   model,
  vocab_size,
  embedding_size,
  name,
  freeze_embeddings 
)

◆ build_initial_rnn_decoder_states()

def caffe2.python.models.seq2seq.seq2seq_util.build_initial_rnn_decoder_states (   model,
  encoder_units_per_layer,
  decoder_units_per_layer,
  final_encoder_hidden_states,
  final_encoder_cell_states,
  use_attention 
)

◆ gen_vocab()

def caffe2.python.models.seq2seq.seq2seq_util.gen_vocab (   corpus,
  unk_threshold 
)

◆ get_layer_scope()

◆ get_numberized_sentence()

def caffe2.python.models.seq2seq.seq2seq_util.get_numberized_sentence (   sentence,
  vocab 
)

◆ output_projection()

def caffe2.python.models.seq2seq.seq2seq_util.output_projection (   model,
  decoder_outputs,
  decoder_output_size,
  target_vocab_size,
  decoder_softmax_size 
)

◆ rnn_bidirectional_layer()

def caffe2.python.models.seq2seq.seq2seq_util.rnn_bidirectional_layer (   model,
  inputs,
  input_lengths,
  input_size,
  num_units,
  dropout_keep_prob,
  forward_only,
  return_sequence_output,
  return_final_state,
  scope = None 
)

◆ rnn_unidirectional_layer()

def caffe2.python.models.seq2seq.seq2seq_util.rnn_unidirectional_layer (   model,
  inputs,
  input_lengths,
  input_size,
  num_units,
  dropout_keep_prob,
  forward_only,
  return_sequence_output,
  return_final_state,
  scope = None 
)

Variable Documentation

◆ EOS

string caffe2.python.models.seq2seq.seq2seq_util.EOS = '<EOS>'

Definition at line 22 of file seq2seq_util.py.

◆ EOS_ID

int caffe2.python.models.seq2seq.seq2seq_util.EOS_ID = 2

Definition at line 21 of file seq2seq_util.py.

◆ GO

◆ GO_ID

int caffe2.python.models.seq2seq.seq2seq_util.GO_ID = 1

Definition at line 19 of file seq2seq_util.py.

◆ PAD

string caffe2.python.models.seq2seq.seq2seq_util.PAD = '<PAD>'

Definition at line 18 of file seq2seq_util.py.

◆ PAD_ID

int caffe2.python.models.seq2seq.seq2seq_util.PAD_ID = 0

Definition at line 17 of file seq2seq_util.py.

◆ UNK

string caffe2.python.models.seq2seq.seq2seq_util.UNK = '<UNK>'

Definition at line 24 of file seq2seq_util.py.

◆ UNK_ID

int caffe2.python.models.seq2seq.seq2seq_util.UNK_ID = 3

Definition at line 23 of file seq2seq_util.py.