Spaces:
Running
on
Zero
Running
on
Zero
import typing as tp | |
import math | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from fireredtts.modules.flow.utils import make_pad_mask | |
class MultiHeadedAttention(nn.Module): | |
"""Multi-Head Attention layer. | |
Args: | |
n_head (int): The number of heads. | |
n_feat (int): The number of features. | |
dropout_rate (float): Dropout rate. | |
""" | |
def __init__(self, | |
n_head: int, | |
n_feat: int, | |
dropout_rate: float, | |
key_bias: bool = True): | |
"""Construct an MultiHeadedAttention object.""" | |
super().__init__() | |
assert n_feat % n_head == 0 | |
# We assume d_v always equals d_k | |
self.d_k = n_feat // n_head | |
self.h = n_head | |
self.linear_q = nn.Linear(n_feat, n_feat) | |
self.linear_k = nn.Linear(n_feat, n_feat, bias=key_bias) | |
self.linear_v = nn.Linear(n_feat, n_feat) | |
self.linear_out = nn.Linear(n_feat, n_feat) | |
self.dropout = nn.Dropout(p=dropout_rate) | |
def forward_qkv( | |
self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor | |
) -> tp.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: | |
"""Transform query, key and value. | |
Args: | |
query (torch.Tensor): Query tensor (#batch, time1, size). | |
key (torch.Tensor): Key tensor (#batch, time2, size). | |
value (torch.Tensor): Value tensor (#batch, time2, size). | |
Returns: | |
torch.Tensor: Transformed query tensor, size | |
(#batch, n_head, time1, d_k). | |
torch.Tensor: Transformed key tensor, size | |
(#batch, n_head, time2, d_k). | |
torch.Tensor: Transformed value tensor, size | |
(#batch, n_head, time2, d_k). | |
""" | |
n_batch = query.size(0) | |
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) | |
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) | |
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) | |
q = q.transpose(1, 2) # (batch, head, time1, d_k) | |
k = k.transpose(1, 2) # (batch, head, time2, d_k) | |
v = v.transpose(1, 2) # (batch, head, time2, d_k) | |
return q, k, v | |
def forward_attention( | |
self, | |
value: torch.Tensor, | |
scores: torch.Tensor, | |
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool) | |
) -> torch.Tensor: | |
"""Compute attention context vector. | |
Args: | |
value (torch.Tensor): Transformed value, size | |
(#batch, n_head, time2, d_k). | |
scores (torch.Tensor): Attention score, size | |
(#batch, n_head, time1, time2). | |
mask (torch.Tensor): Mask, size (#batch, 1, time2) or | |
(#batch, time1, time2), (0, 0, 0) means fake mask. | |
Returns: | |
torch.Tensor: Transformed value (#batch, time1, d_model) | |
weighted by the attention score (#batch, time1, time2). | |
""" | |
n_batch = value.size(0) | |
# NOTE(xcsong): When will `if mask.size(2) > 0` be True? | |
# 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the | |
# 1st chunk to ease the onnx export.] | |
# 2. pytorch training | |
if mask.size(2) > 0: # time2 > 0 | |
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2) | |
# For last chunk, time2 might be larger than scores.size(-1) | |
mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2) | |
scores = scores.masked_fill(mask, -float('inf')) | |
attn = torch.softmax(scores, dim=-1).masked_fill( | |
mask, 0.0) # (batch, head, time1, time2) | |
# NOTE(xcsong): When will `if mask.size(2) > 0` be False? | |
# 1. onnx(16/-1, -1/-1, 16/0) | |
# 2. jit (16/-1, -1/-1, 16/0, 16/4) | |
else: | |
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) | |
p_attn = self.dropout(attn) | |
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k) | |
x = (x.transpose(1, 2).contiguous().view(n_batch, -1, | |
self.h * self.d_k) | |
) # (batch, time1, d_model) | |
return self.linear_out(x) # (batch, time1, d_model) | |
def forward( | |
self, | |
query: torch.Tensor, | |
key: torch.Tensor, | |
value: torch.Tensor, | |
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), | |
pos_emb: torch.Tensor = torch.empty(0), | |
cache: torch.Tensor = torch.zeros((0, 0, 0, 0)) | |
) -> tp.Tuple[torch.Tensor, torch.Tensor]: | |
"""Compute scaled dot product attention. | |
Args: | |
query (torch.Tensor): Query tensor (#batch, time1, size). | |
key (torch.Tensor): Key tensor (#batch, time2, size). | |
value (torch.Tensor): Value tensor (#batch, time2, size). | |
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or | |
(#batch, time1, time2). | |
1.When applying cross attention between decoder and encoder, | |
the batch padding mask for input is in (#batch, 1, T) shape. | |
2.When applying self attention of encoder, | |
the mask is in (#batch, T, T) shape. | |
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2), | |
where `cache_t == chunk_size * num_decoding_left_chunks` | |
and `head * d_k == size` | |
Returns: | |
torch.Tensor: Output tensor (#batch, time1, d_model). | |
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2) | |
where `cache_t == chunk_size * num_decoding_left_chunks` | |
and `head * d_k == size` | |
""" | |
q, k, v = self.forward_qkv(query, key, value) | |
# NOTE(xcsong): | |
# when export onnx model, for 1st chunk, we feed | |
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode) | |
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode). | |
# In all modes, `if cache.size(0) > 0` will alwayse be `True` | |
# and we will always do splitting and | |
# concatnation(this will simplify onnx export). Note that | |
# it's OK to concat & split zero-shaped tensors(see code below). | |
# when export jit model, for 1st chunk, we always feed | |
# cache(0, 0, 0, 0) since jit supports dynamic if-branch. | |
# >>> a = torch.ones((1, 2, 0, 4)) | |
# >>> b = torch.ones((1, 2, 3, 4)) | |
# >>> c = torch.cat((a, b), dim=2) | |
# >>> torch.equal(b, c) # True | |
# >>> d = torch.split(a, 2, dim=-1) | |
# >>> torch.equal(d[0], d[1]) # True | |
if cache.size(0) > 0: | |
key_cache, value_cache = torch.split(cache, | |
cache.size(-1) // 2, | |
dim=-1) | |
k = torch.cat([key_cache, k], dim=2) | |
v = torch.cat([value_cache, v], dim=2) | |
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's | |
# non-trivial to calculate `next_cache_start` here. | |
new_cache = torch.cat((k, v), dim=-1) | |
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) | |
return self.forward_attention(v, scores, mask), new_cache | |
class RelPositionMultiHeadedAttention(MultiHeadedAttention): | |
"""Multi-Head Attention layer with relative position encoding. | |
Paper: https://arxiv.org/abs/1901.02860 | |
Args: | |
n_head (int): The number of heads. | |
n_feat (int): The number of features. | |
dropout_rate (float): Dropout rate. | |
""" | |
def __init__(self, | |
n_head: int, | |
n_feat: int, | |
dropout_rate: float, | |
key_bias: bool = True): | |
"""Construct an RelPositionMultiHeadedAttention object.""" | |
super().__init__(n_head, n_feat, dropout_rate, key_bias) | |
# linear transformation for positional encoding | |
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) | |
# these two learnable bias are used in matrix c and matrix d | |
# as described in https://arxiv.org/abs/1901.02860 Section 3.3 | |
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) | |
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) | |
torch.nn.init.xavier_uniform_(self.pos_bias_u) | |
torch.nn.init.xavier_uniform_(self.pos_bias_v) | |
def rel_shift(self, x): | |
"""Compute relative positional encoding. | |
Args: | |
x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1). | |
time1 means the length of query vector. | |
Returns: | |
torch.Tensor: Output tensor. | |
""" | |
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) | |
x_padded = torch.cat([zero_pad, x], dim=-1) | |
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) | |
x = x_padded[:, :, 1:].view_as(x)[ | |
:, :, :, : x.size(-1) // 2 + 1 | |
] # only keep the positions from 0 to time2 | |
return x | |
def forward( | |
self, | |
query: torch.Tensor, | |
key: torch.Tensor, | |
value: torch.Tensor, | |
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), | |
pos_emb: torch.Tensor = torch.empty(0), | |
cache: torch.Tensor = torch.zeros((0, 0, 0, 0)) | |
) -> tp.Tuple[torch.Tensor, torch.Tensor]: | |
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding. | |
Args: | |
query (torch.Tensor): Query tensor (#batch, time1, size). | |
key (torch.Tensor): Key tensor (#batch, time2, size). | |
value (torch.Tensor): Value tensor (#batch, time2, size). | |
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or | |
(#batch, time1, time2), (0, 0, 0) means fake mask. | |
pos_emb (torch.Tensor): Positional embedding tensor | |
(#batch, time2, size). | |
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2), | |
where `cache_t == chunk_size * num_decoding_left_chunks` | |
and `head * d_k == size` | |
Returns: | |
torch.Tensor: Output tensor (#batch, time1, d_model). | |
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2) | |
where `cache_t == chunk_size * num_decoding_left_chunks` | |
and `head * d_k == size` | |
""" | |
q, k, v = self.forward_qkv(query, key, value) | |
q = q.transpose(1, 2) # (batch, time1, head, d_k) | |
# NOTE(xcsong): | |
# when export onnx model, for 1st chunk, we feed | |
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode) | |
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode). | |
# In all modes, `if cache.size(0) > 0` will alwayse be `True` | |
# and we will always do splitting and | |
# concatnation(this will simplify onnx export). Note that | |
# it's OK to concat & split zero-shaped tensors(see code below). | |
# when export jit model, for 1st chunk, we always feed | |
# cache(0, 0, 0, 0) since jit supports dynamic if-branch. | |
# >>> a = torch.ones((1, 2, 0, 4)) | |
# >>> b = torch.ones((1, 2, 3, 4)) | |
# >>> c = torch.cat((a, b), dim=2) | |
# >>> torch.equal(b, c) # True | |
# >>> d = torch.split(a, 2, dim=-1) | |
# >>> torch.equal(d[0], d[1]) # True | |
if cache.size(0) > 0: | |
key_cache, value_cache = torch.split(cache, | |
cache.size(-1) // 2, | |
dim=-1) | |
k = torch.cat([key_cache, k], dim=2) | |
v = torch.cat([value_cache, v], dim=2) | |
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's | |
# non-trivial to calculate `next_cache_start` here. | |
new_cache = torch.cat((k, v), dim=-1) | |
n_batch_pos = pos_emb.size(0) | |
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) | |
p = p.transpose(1, 2) # (batch, head, time1, d_k) | |
# (batch, head, time1, d_k) | |
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) | |
# (batch, head, time1, d_k) | |
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) | |
# compute attention score | |
# first compute matrix a and matrix c | |
# as described in https://arxiv.org/abs/1901.02860 Section 3.3 | |
# (batch, head, time1, time2) | |
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) | |
# compute matrix b and matrix d | |
# (batch, head, time1, time2) | |
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) | |
# NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used | |
if matrix_ac.shape != matrix_bd.shape: | |
matrix_bd = self.rel_shift(matrix_bd) | |
scores = (matrix_ac + matrix_bd) / math.sqrt( | |
self.d_k) # (batch, head, time1, time2) | |
return self.forward_attention(v, scores, mask), new_cache | |
class PositionwiseFeedForward(torch.nn.Module): | |
"""Positionwise feed forward layer. | |
FeedForward are appied on each position of the sequence. | |
The output dim is same with the input dim. | |
Args: | |
idim (int): Input dimenstion. | |
hidden_units (int): The number of hidden units. | |
dropout_rate (float): Dropout rate. | |
activation (torch.nn.Module): Activation function | |
""" | |
def __init__( | |
self, | |
idim: int, | |
hidden_units: int, | |
dropout_rate: float, | |
activation: torch.nn.Module = torch.nn.ReLU(), | |
): | |
"""Construct a PositionwiseFeedForward object.""" | |
super(PositionwiseFeedForward, self).__init__() | |
self.w_1 = torch.nn.Linear(idim, hidden_units) | |
self.activation = activation | |
self.dropout = torch.nn.Dropout(dropout_rate) | |
self.w_2 = torch.nn.Linear(hidden_units, idim) | |
def forward(self, xs: torch.Tensor) -> torch.Tensor: | |
"""Forward function. | |
Args: | |
xs: input tensor (B, L, D) | |
Returns: | |
output tensor, (B, L, D) | |
""" | |
return self.w_2(self.dropout(self.activation(self.w_1(xs)))) | |
class ConformerDecoderLayer(nn.Module): | |
"""Encoder layer module. | |
Args: | |
size (int): Input dimension. | |
self_attn (torch.nn.Module): Self-attention module instance. | |
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention` | |
instance can be used as the argument. | |
src_attn (torch.nn.Module): Cross-attention module instance. | |
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention` | |
instance can be used as the argument. | |
feed_forward (torch.nn.Module): Feed-forward module instance. | |
`PositionwiseFeedForward` instance can be used as the argument. | |
feed_forward_macaron (torch.nn.Module): Additional feed-forward module | |
instance. | |
`PositionwiseFeedForward` instance can be used as the argument. | |
conv_module (torch.nn.Module): Convolution module instance. | |
`ConvlutionModule` instance can be used as the argument. | |
dropout_rate (float): Dropout rate. | |
normalize_before (bool): | |
True: use layer_norm before each sub-block. | |
False: use layer_norm after each sub-block. | |
""" | |
def __init__( | |
self, | |
size: int, | |
self_attn: torch.nn.Module, | |
src_attn: tp.Optional[torch.nn.Module] = None, | |
feed_forward: tp.Optional[nn.Module] = None, | |
feed_forward_macaron: tp.Optional[nn.Module] = None, | |
conv_module: tp.Optional[nn.Module] = None, | |
dropout_rate: float = 0.1, | |
normalize_before: bool = True, | |
): | |
"""Construct an EncoderLayer object.""" | |
super().__init__() | |
self.self_attn = self_attn | |
self.src_attn = src_attn | |
self.feed_forward = feed_forward | |
self.feed_forward_macaron = feed_forward_macaron | |
self.conv_module = conv_module | |
self.norm_ff = nn.LayerNorm(size, eps=1e-5) # for the FNN module | |
self.norm_mha = nn.LayerNorm(size, eps=1e-5) # for the MHA module | |
if src_attn is not None: | |
self.norm_mha2 = nn.LayerNorm(size, eps=1e-5) # for the MHA module(src_attn) | |
if feed_forward_macaron is not None: | |
self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-5) | |
self.ff_scale = 0.5 | |
else: | |
self.ff_scale = 1.0 | |
if self.conv_module is not None: | |
self.norm_conv = nn.LayerNorm(size, eps=1e-5) # for the CNN module | |
self.norm_final = nn.LayerNorm( | |
size, eps=1e-5) # for the final output of the block | |
self.dropout = nn.Dropout(dropout_rate) | |
self.size = size | |
self.normalize_before = normalize_before | |
def forward( | |
self, | |
x: torch.Tensor, | |
mask: torch.Tensor, | |
# src-attention | |
memory: torch.Tensor, | |
memory_mask: torch.Tensor, | |
pos_emb: torch.Tensor, | |
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), | |
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), | |
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), | |
) -> tp.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: | |
"""Compute encoded features. | |
Args: | |
x (torch.Tensor): (#batch, time, size) | |
mask (torch.Tensor): Mask tensor for the input (#batch, time,time), | |
(0, 0, 0) means fake mask. | |
pos_emb (torch.Tensor): positional encoding, must not be None | |
for ConformerEncoderLayer. | |
mask_pad (torch.Tensor): batch padding mask used for conv module. | |
(#batch, 1, time), (0, 0, 0) means fake mask. | |
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE | |
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size. | |
cnn_cache (torch.Tensor): Convolution cache in conformer layer | |
(#batch=1, size, cache_t2) | |
Returns: | |
torch.Tensor: Output tensor (#batch, time, size). | |
torch.Tensor: Mask tensor (#batch, time, time). | |
torch.Tensor: att_cache tensor, | |
(#batch=1, head, cache_t1 + time, d_k * 2). | |
torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2). | |
""" | |
# whether to use macaron style | |
if self.feed_forward_macaron is not None: | |
residual = x | |
if self.normalize_before: | |
x = self.norm_ff_macaron(x) | |
x = residual + self.ff_scale * self.dropout( | |
self.feed_forward_macaron(x)) | |
if not self.normalize_before: | |
x = self.norm_ff_macaron(x) | |
# multi-headed self-attention module | |
residual = x | |
if self.normalize_before: | |
x = self.norm_mha(x) | |
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, | |
att_cache) | |
x = residual + self.dropout(x_att) | |
if not self.normalize_before: | |
x = self.norm_mha(x) | |
# multi-headed cross-attention module | |
if self.src_attn is not None: | |
residual = x | |
if self.normalize_before: | |
x = self.norm_mha2(x) | |
x_att, _ = self.src_attn(x, memory, memory, memory_mask) | |
x = residual + self.dropout(x_att) | |
if not self.normalize_before: | |
x = self.norm_mha2(x) | |
# convolution module | |
# Fake new cnn cache here, and then change it in conv_module | |
new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) | |
if self.conv_module is not None: | |
residual = x | |
if self.normalize_before: | |
x = self.norm_conv(x) | |
x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache) | |
x = residual + self.dropout(x) | |
if not self.normalize_before: | |
x = self.norm_conv(x) | |
# feed forward module | |
residual = x | |
if self.normalize_before: | |
x = self.norm_ff(x) | |
x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) | |
if not self.normalize_before: | |
x = self.norm_ff(x) | |
if self.conv_module is not None: | |
x = self.norm_final(x) | |
return x, mask, new_att_cache, new_cnn_cache | |
class EspnetRelPositionalEncoding(torch.nn.Module): | |
"""Relative positional encoding module (new implementation). | |
Details can be found in https://github.com/espnet/espnet/pull/2816. | |
See : Appendix B in https://arxiv.org/abs/1901.02860 | |
Args: | |
d_model (int): Embedding dimension. | |
dropout_rate (float): Dropout rate. | |
max_len (int): Maximum input length. | |
""" | |
def __init__(self, d_model, dropout_rate, max_len=5000): | |
"""Construct an PositionalEncoding object.""" | |
super(EspnetRelPositionalEncoding, self).__init__() | |
self.d_model = d_model | |
self.xscale = math.sqrt(self.d_model) | |
self.dropout = torch.nn.Dropout(p=dropout_rate) | |
self.pe = None | |
self.extend_pe(torch.tensor(0.0).expand(1, max_len)) | |
def extend_pe(self, x): | |
"""Reset the positional encodings.""" | |
if self.pe is not None: | |
# self.pe contains both positive and negative parts | |
# the length of self.pe is 2 * input_len - 1 | |
if self.pe.size(1) >= x.size(1) * 2 - 1: | |
if self.pe.dtype != x.dtype or self.pe.device != x.device: | |
self.pe = self.pe.to(dtype=x.dtype, device=x.device) | |
return | |
# Suppose `i` means to the position of query vecotr and `j` means the | |
# position of key vector. We use position relative positions when keys | |
# are to the left (i>j) and negative relative positions otherwise (i<j). | |
pe_positive = torch.zeros(x.size(1), self.d_model) | |
pe_negative = torch.zeros(x.size(1), self.d_model) | |
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) | |
div_term = torch.exp( | |
torch.arange(0, self.d_model, 2, dtype=torch.float32) | |
* -(math.log(10000.0) / self.d_model) | |
) | |
pe_positive[:, 0::2] = torch.sin(position * div_term) | |
pe_positive[:, 1::2] = torch.cos(position * div_term) | |
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) | |
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) | |
# Reserve the order of positive indices and concat both positive and | |
# negative indices. This is used to support the shifting trick | |
# as in https://arxiv.org/abs/1901.02860 | |
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) | |
pe_negative = pe_negative[1:].unsqueeze(0) | |
pe = torch.cat([pe_positive, pe_negative], dim=1) | |
self.pe = pe.to(device=x.device, dtype=x.dtype) | |
def forward(self, x: torch.Tensor, offset: tp.Union[int, torch.Tensor] = 0): | |
"""Add positional encoding. | |
Args: | |
x (torch.Tensor): Input tensor (batch, time, `*`). | |
Returns: | |
torch.Tensor: Encoded tensor (batch, time, `*`). | |
""" | |
self.extend_pe(x) | |
x = x * self.xscale | |
pos_emb = self.position_encoding(size=x.size(1), offset=offset) | |
return self.dropout(x), self.dropout(pos_emb) | |
def position_encoding(self, | |
offset: tp.Union[int, torch.Tensor], | |
size: int) -> torch.Tensor: | |
""" For getting encoding in a streaming fashion | |
Attention!!!!! | |
we apply dropout only once at the whole utterance level in a none | |
streaming way, but will call this function several times with | |
increasing input size in a streaming scenario, so the dropout will | |
be applied several times. | |
Args: | |
offset (int or torch.tensor): start offset | |
size (int): required size of position encoding | |
Returns: | |
torch.Tensor: Corresponding encoding | |
""" | |
pos_emb = self.pe[ | |
:, | |
self.pe.size(1) // 2 - size + 1 : self.pe.size(1) // 2 + size, | |
] | |
return pos_emb | |
class LinearNoSubsampling(torch.nn.Module): | |
"""Linear transform the input without subsampling | |
Args: | |
idim (int): Input dimension. | |
odim (int): Output dimension. | |
dropout_rate (float): Dropout rate. | |
""" | |
def __init__(self, idim: int, odim: int, dropout_rate: float, | |
pos_enc_class: torch.nn.Module): | |
"""Construct an linear object.""" | |
super().__init__() | |
self.out = torch.nn.Sequential( | |
torch.nn.Linear(idim, odim), | |
torch.nn.LayerNorm(odim, eps=1e-5), | |
torch.nn.Dropout(dropout_rate), | |
) | |
self.pos_enc = pos_enc_class | |
self.right_context = 0 | |
self.subsampling_rate = 1 | |
def forward( | |
self, | |
x: torch.Tensor, | |
x_mask: torch.Tensor, | |
offset: tp.Union[int, torch.Tensor] = 0 | |
) -> tp.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: | |
"""Input x. | |
Args: | |
x (torch.Tensor): Input tensor (#batch, time, idim). | |
x_mask (torch.Tensor): Input mask (#batch, 1, time). | |
Returns: | |
torch.Tensor: linear input tensor (#batch, time', odim), | |
where time' = time . | |
torch.Tensor: linear input mask (#batch, 1, time'), | |
where time' = time . | |
""" | |
x = self.out(x) | |
x, pos_emb = self.pos_enc(x, offset) | |
return x, pos_emb, x_mask | |
class ConformerDecoderV2(nn.Module): | |
def __init__(self, | |
input_size: int = 512, | |
output_size: int = 512, | |
attention_heads: int = 8, | |
linear_units: int = 2048, | |
num_blocks: int = 6, | |
dropout_rate: float = 0.01, | |
srcattention_start_index: int = 0, | |
srcattention_end_index: int = 2, | |
attention_dropout_rate: float = 0.01, | |
positional_dropout_rate: float = 0.01, | |
key_bias: bool = True, | |
normalize_before: bool = True, | |
): | |
super().__init__() | |
self.num_blocks = num_blocks | |
self.normalize_before = normalize_before | |
self.output_size = output_size | |
self.embed = LinearNoSubsampling( | |
input_size, | |
output_size, | |
dropout_rate, | |
EspnetRelPositionalEncoding(output_size, positional_dropout_rate), | |
) | |
self.encoders = torch.nn.ModuleList() | |
for i in range(self.num_blocks): | |
# construct src attention | |
if srcattention_start_index <= i <= srcattention_end_index: | |
srcattention_layer = MultiHeadedAttention( | |
attention_heads, | |
output_size, | |
attention_dropout_rate, | |
key_bias | |
) | |
else: | |
srcattention_layer = None | |
# construct self attention | |
selfattention_layer = RelPositionMultiHeadedAttention( | |
attention_heads, | |
output_size, | |
attention_dropout_rate, | |
key_bias | |
) | |
# construct ffn | |
ffn_layer = PositionwiseFeedForward( | |
output_size, | |
linear_units, | |
dropout_rate, | |
torch.nn.SiLU() | |
) | |
self.encoders.append( | |
ConformerDecoderLayer( | |
output_size, | |
selfattention_layer, | |
srcattention_layer, | |
ffn_layer, | |
None, | |
None, | |
dropout_rate, | |
normalize_before=normalize_before | |
) | |
) | |
self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5) | |
def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor, | |
memory: torch.Tensor, memory_masks: torch.Tensor, | |
pos_emb: torch.Tensor, mask_pad: torch.Tensor) -> torch.Tensor: | |
for layer in self.encoders: | |
xs, chunk_masks, _, _ = layer(xs, chunk_masks, memory, memory_masks, pos_emb, mask_pad) | |
return xs | |
def forward(self, | |
xs:torch.Tensor, | |
xs_lens:torch.Tensor, | |
memory:torch.Tensor, | |
memory_lens: torch.Tensor, | |
): | |
T = xs.size(1) | |
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T) | |
T2 = memory.size(1) | |
memory_masks = ~make_pad_mask(memory_lens, T2).unsqueeze(1) # (B, 1, T2) | |
xs, pos_emb, masks = self.embed(xs, masks) | |
xs = self.forward_layers(xs, masks, memory, memory_masks, pos_emb, masks) | |
if self.normalize_before: | |
xs = self.after_norm(xs) | |
return xs, masks | |