mirror of
https://github.com/HumanAIGC/lite-avatar.git
synced 2026-02-05 18:09:20 +08:00
add files
This commit is contained in:
0
funasr_local/lm/__init__.py
Normal file
0
funasr_local/lm/__init__.py
Normal file
158
funasr_local/lm/abs_model.py
Normal file
158
funasr_local/lm/abs_model.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from abc import ABC
|
||||
from abc import abstractmethod
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from funasr_local.modules.scorers.scorer_interface import BatchScorerInterface
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from typeguard import check_argument_types
|
||||
|
||||
from funasr_local.modules.nets_utils import make_pad_mask
|
||||
from funasr_local.torch_utils.device_funcs import force_gatherable
|
||||
from funasr_local.train.abs_espnet_model import AbsESPnetModel
|
||||
|
||||
class AbsLM(torch.nn.Module, BatchScorerInterface, ABC):
|
||||
"""The abstract LM class
|
||||
|
||||
To share the loss calculation way among different models,
|
||||
We uses delegate pattern here:
|
||||
The instance of this class should be passed to "LanguageModel"
|
||||
|
||||
>>> from funasr_local.lm.abs_model import AbsLM
|
||||
>>> lm = AbsLM()
|
||||
>>> model = LanguageESPnetModel(lm=lm)
|
||||
|
||||
This "model" is one of mediator objects for "Task" class.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def forward(
|
||||
self, input: torch.Tensor, hidden: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LanguageModel(AbsESPnetModel):
|
||||
def __init__(self, lm: AbsLM, vocab_size: int, ignore_id: int = 0):
|
||||
# assert check_argument_types()
|
||||
super().__init__()
|
||||
self.lm = lm
|
||||
self.sos = 1
|
||||
self.eos = 2
|
||||
|
||||
# ignore_id may be assumed as 0, shared with CTC-blank symbol for ASR.
|
||||
self.ignore_id = ignore_id
|
||||
|
||||
def nll(
|
||||
self,
|
||||
text: torch.Tensor,
|
||||
text_lengths: torch.Tensor,
|
||||
max_length: Optional[int] = None,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Compute negative log likelihood(nll)
|
||||
|
||||
Normally, this function is called in batchify_nll.
|
||||
Args:
|
||||
text: (Batch, Length)
|
||||
text_lengths: (Batch,)
|
||||
max_lengths: int
|
||||
"""
|
||||
batch_size = text.size(0)
|
||||
# For data parallel
|
||||
if max_length is None:
|
||||
text = text[:, : text_lengths.max()]
|
||||
else:
|
||||
text = text[:, :max_length]
|
||||
|
||||
# 1. Create a sentence pair like '<sos> w1 w2 w3' and 'w1 w2 w3 <eos>'
|
||||
# text: (Batch, Length) -> x, y: (Batch, Length + 1)
|
||||
x = F.pad(text, [1, 0], "constant", self.sos)
|
||||
t = F.pad(text, [0, 1], "constant", self.ignore_id)
|
||||
for i, l in enumerate(text_lengths):
|
||||
t[i, l] = self.eos
|
||||
x_lengths = text_lengths + 1
|
||||
|
||||
# 2. Forward Language model
|
||||
# x: (Batch, Length) -> y: (Batch, Length, NVocab)
|
||||
y, _ = self.lm(x, None)
|
||||
|
||||
# 3. Calc negative log likelihood
|
||||
# nll: (BxL,)
|
||||
nll = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none")
|
||||
# nll: (BxL,) -> (BxL,)
|
||||
if max_length is None:
|
||||
nll.masked_fill_(make_pad_mask(x_lengths).to(nll.device).view(-1), 0.0)
|
||||
else:
|
||||
nll.masked_fill_(
|
||||
make_pad_mask(x_lengths, maxlen=max_length + 1).to(nll.device).view(-1),
|
||||
0.0,
|
||||
)
|
||||
# nll: (BxL,) -> (B, L)
|
||||
nll = nll.view(batch_size, -1)
|
||||
return nll, x_lengths
|
||||
|
||||
def batchify_nll(
|
||||
self, text: torch.Tensor, text_lengths: torch.Tensor, batch_size: int = 100
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Compute negative log likelihood(nll) from transformer language model
|
||||
|
||||
To avoid OOM, this fuction seperate the input into batches.
|
||||
Then call nll for each batch and combine and return results.
|
||||
Args:
|
||||
text: (Batch, Length)
|
||||
text_lengths: (Batch,)
|
||||
batch_size: int, samples each batch contain when computing nll,
|
||||
you may change this to avoid OOM or increase
|
||||
|
||||
"""
|
||||
total_num = text.size(0)
|
||||
if total_num <= batch_size:
|
||||
nll, x_lengths = self.nll(text, text_lengths)
|
||||
else:
|
||||
nlls = []
|
||||
x_lengths = []
|
||||
max_length = text_lengths.max()
|
||||
|
||||
start_idx = 0
|
||||
while True:
|
||||
end_idx = min(start_idx + batch_size, total_num)
|
||||
batch_text = text[start_idx:end_idx, :]
|
||||
batch_text_lengths = text_lengths[start_idx:end_idx]
|
||||
# batch_nll: [B * T]
|
||||
batch_nll, batch_x_lengths = self.nll(
|
||||
batch_text, batch_text_lengths, max_length=max_length
|
||||
)
|
||||
nlls.append(batch_nll)
|
||||
x_lengths.append(batch_x_lengths)
|
||||
start_idx = end_idx
|
||||
if start_idx == total_num:
|
||||
break
|
||||
nll = torch.cat(nlls)
|
||||
x_lengths = torch.cat(x_lengths)
|
||||
assert nll.size(0) == total_num
|
||||
assert x_lengths.size(0) == total_num
|
||||
return nll, x_lengths
|
||||
|
||||
def forward(
|
||||
self, text: torch.Tensor, text_lengths: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
|
||||
nll, y_lengths = self.nll(text, text_lengths)
|
||||
ntokens = y_lengths.sum()
|
||||
loss = nll.sum() / ntokens
|
||||
stats = dict(loss=loss.detach())
|
||||
|
||||
# force_gatherable: to-device and to-tensor if scalar for DataParallel
|
||||
loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
|
||||
return loss, stats, weight
|
||||
|
||||
def collect_feats(
|
||||
self, text: torch.Tensor, text_lengths: torch.Tensor
|
||||
) -> Dict[str, torch.Tensor]:
|
||||
return {}
|
||||
131
funasr_local/lm/espnet_model.py
Normal file
131
funasr_local/lm/espnet_model.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from typeguard import check_argument_types
|
||||
|
||||
from funasr_local.modules.nets_utils import make_pad_mask
|
||||
from funasr_local.lm.abs_model import AbsLM
|
||||
from funasr_local.torch_utils.device_funcs import force_gatherable
|
||||
from funasr_local.train.abs_espnet_model import AbsESPnetModel
|
||||
|
||||
|
||||
class ESPnetLanguageModel(AbsESPnetModel):
|
||||
def __init__(self, lm: AbsLM, vocab_size: int, ignore_id: int = 0):
|
||||
assert check_argument_types()
|
||||
super().__init__()
|
||||
self.lm = lm
|
||||
self.sos = 1
|
||||
self.eos = 2
|
||||
|
||||
# ignore_id may be assumed as 0, shared with CTC-blank symbol for ASR.
|
||||
self.ignore_id = ignore_id
|
||||
|
||||
def nll(
|
||||
self,
|
||||
text: torch.Tensor,
|
||||
text_lengths: torch.Tensor,
|
||||
max_length: Optional[int] = None,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Compute negative log likelihood(nll)
|
||||
|
||||
Normally, this function is called in batchify_nll.
|
||||
Args:
|
||||
text: (Batch, Length)
|
||||
text_lengths: (Batch,)
|
||||
max_lengths: int
|
||||
"""
|
||||
batch_size = text.size(0)
|
||||
# For data parallel
|
||||
if max_length is None:
|
||||
text = text[:, : text_lengths.max()]
|
||||
else:
|
||||
text = text[:, :max_length]
|
||||
|
||||
# 1. Create a sentence pair like '<sos> w1 w2 w3' and 'w1 w2 w3 <eos>'
|
||||
# text: (Batch, Length) -> x, y: (Batch, Length + 1)
|
||||
x = F.pad(text, [1, 0], "constant", self.eos)
|
||||
t = F.pad(text, [0, 1], "constant", self.ignore_id)
|
||||
for i, l in enumerate(text_lengths):
|
||||
t[i, l] = self.sos
|
||||
x_lengths = text_lengths + 1
|
||||
|
||||
# 2. Forward Language model
|
||||
# x: (Batch, Length) -> y: (Batch, Length, NVocab)
|
||||
y, _ = self.lm(x, None)
|
||||
|
||||
# 3. Calc negative log likelihood
|
||||
# nll: (BxL,)
|
||||
nll = F.cross_entropy(y.view(-1, y.shape[-1]), t.view(-1), reduction="none")
|
||||
# nll: (BxL,) -> (BxL,)
|
||||
if max_length is None:
|
||||
nll.masked_fill_(make_pad_mask(x_lengths).to(nll.device).view(-1), 0.0)
|
||||
else:
|
||||
nll.masked_fill_(
|
||||
make_pad_mask(x_lengths, maxlen=max_length + 1).to(nll.device).view(-1),
|
||||
0.0,
|
||||
)
|
||||
# nll: (BxL,) -> (B, L)
|
||||
nll = nll.view(batch_size, -1)
|
||||
return nll, x_lengths
|
||||
|
||||
def batchify_nll(
|
||||
self, text: torch.Tensor, text_lengths: torch.Tensor, batch_size: int = 100
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Compute negative log likelihood(nll) from transformer language model
|
||||
|
||||
To avoid OOM, this fuction seperate the input into batches.
|
||||
Then call nll for each batch and combine and return results.
|
||||
Args:
|
||||
text: (Batch, Length)
|
||||
text_lengths: (Batch,)
|
||||
batch_size: int, samples each batch contain when computing nll,
|
||||
you may change this to avoid OOM or increase
|
||||
|
||||
"""
|
||||
total_num = text.size(0)
|
||||
if total_num <= batch_size:
|
||||
nll, x_lengths = self.nll(text, text_lengths)
|
||||
else:
|
||||
nlls = []
|
||||
x_lengths = []
|
||||
max_length = text_lengths.max()
|
||||
|
||||
start_idx = 0
|
||||
while True:
|
||||
end_idx = min(start_idx + batch_size, total_num)
|
||||
batch_text = text[start_idx:end_idx, :]
|
||||
batch_text_lengths = text_lengths[start_idx:end_idx]
|
||||
# batch_nll: [B * T]
|
||||
batch_nll, batch_x_lengths = self.nll(
|
||||
batch_text, batch_text_lengths, max_length=max_length
|
||||
)
|
||||
nlls.append(batch_nll)
|
||||
x_lengths.append(batch_x_lengths)
|
||||
start_idx = end_idx
|
||||
if start_idx == total_num:
|
||||
break
|
||||
nll = torch.cat(nlls)
|
||||
x_lengths = torch.cat(x_lengths)
|
||||
assert nll.size(0) == total_num
|
||||
assert x_lengths.size(0) == total_num
|
||||
return nll, x_lengths
|
||||
|
||||
def forward(
|
||||
self, text: torch.Tensor, text_lengths: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
|
||||
nll, y_lengths = self.nll(text, text_lengths)
|
||||
ntokens = y_lengths.sum()
|
||||
loss = nll.sum() / ntokens
|
||||
stats = dict(loss=loss.detach())
|
||||
|
||||
# force_gatherable: to-device and to-tensor if scalar for DataParallel
|
||||
loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
|
||||
return loss, stats, weight
|
||||
|
||||
def collect_feats(
|
||||
self, text: torch.Tensor, text_lengths: torch.Tensor
|
||||
) -> Dict[str, torch.Tensor]:
|
||||
return {}
|
||||
174
funasr_local/lm/seq_rnn_lm.py
Normal file
174
funasr_local/lm/seq_rnn_lm.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Sequential implementation of Recurrent Neural Network Language Model."""
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from typeguard import check_argument_types
|
||||
|
||||
from funasr_local.lm.abs_model import AbsLM
|
||||
|
||||
|
||||
class SequentialRNNLM(AbsLM):
|
||||
"""Sequential RNNLM.
|
||||
|
||||
See also:
|
||||
https://github.com/pytorch/examples/blob/4581968193699de14b56527296262dd76ab43557/word_language_model/model.py
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size: int,
|
||||
unit: int = 650,
|
||||
nhid: int = None,
|
||||
nlayers: int = 2,
|
||||
dropout_rate: float = 0.0,
|
||||
tie_weights: bool = False,
|
||||
rnn_type: str = "lstm",
|
||||
ignore_id: int = 0,
|
||||
):
|
||||
assert check_argument_types()
|
||||
super().__init__()
|
||||
|
||||
ninp = unit
|
||||
if nhid is None:
|
||||
nhid = unit
|
||||
rnn_type = rnn_type.upper()
|
||||
|
||||
self.drop = nn.Dropout(dropout_rate)
|
||||
self.encoder = nn.Embedding(vocab_size, ninp, padding_idx=ignore_id)
|
||||
if rnn_type in ["LSTM", "GRU"]:
|
||||
rnn_class = getattr(nn, rnn_type)
|
||||
self.rnn = rnn_class(
|
||||
ninp, nhid, nlayers, dropout=dropout_rate, batch_first=True
|
||||
)
|
||||
else:
|
||||
try:
|
||||
nonlinearity = {"RNN_TANH": "tanh", "RNN_RELU": "relu"}[rnn_type]
|
||||
except KeyError:
|
||||
raise ValueError(
|
||||
"""An invalid option for `--model` was supplied,
|
||||
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']"""
|
||||
)
|
||||
self.rnn = nn.RNN(
|
||||
ninp,
|
||||
nhid,
|
||||
nlayers,
|
||||
nonlinearity=nonlinearity,
|
||||
dropout=dropout_rate,
|
||||
batch_first=True,
|
||||
)
|
||||
self.decoder = nn.Linear(nhid, vocab_size)
|
||||
|
||||
# Optionally tie weights as in:
|
||||
# "Using the Output Embedding to Improve Language Models"
|
||||
# (Press & Wolf 2016) https://arxiv.org/abs/1608.05859
|
||||
# and
|
||||
# "Tying Word Vectors and Word Classifiers:
|
||||
# A Loss Framework for Language Modeling" (Inan et al. 2016)
|
||||
# https://arxiv.org/abs/1611.01462
|
||||
if tie_weights:
|
||||
if nhid != ninp:
|
||||
raise ValueError(
|
||||
"When using the tied flag, nhid must be equal to emsize"
|
||||
)
|
||||
self.decoder.weight = self.encoder.weight
|
||||
|
||||
self.rnn_type = rnn_type
|
||||
self.nhid = nhid
|
||||
self.nlayers = nlayers
|
||||
|
||||
def zero_state(self):
|
||||
"""Initialize LM state filled with zero values."""
|
||||
if isinstance(self.rnn, torch.nn.LSTM):
|
||||
h = torch.zeros((self.nlayers, self.nhid), dtype=torch.float)
|
||||
c = torch.zeros((self.nlayers, self.nhid), dtype=torch.float)
|
||||
state = h, c
|
||||
else:
|
||||
state = torch.zeros((self.nlayers, self.nhid), dtype=torch.float)
|
||||
|
||||
return state
|
||||
|
||||
def forward(
|
||||
self, input: torch.Tensor, hidden: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
emb = self.drop(self.encoder(input))
|
||||
output, hidden = self.rnn(emb, hidden)
|
||||
output = self.drop(output)
|
||||
decoded = self.decoder(
|
||||
output.contiguous().view(output.size(0) * output.size(1), output.size(2))
|
||||
)
|
||||
return (
|
||||
decoded.view(output.size(0), output.size(1), decoded.size(1)),
|
||||
hidden,
|
||||
)
|
||||
|
||||
def score(
|
||||
self,
|
||||
y: torch.Tensor,
|
||||
state: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
|
||||
x: torch.Tensor,
|
||||
) -> Tuple[torch.Tensor, Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]]:
|
||||
"""Score new token.
|
||||
|
||||
Args:
|
||||
y: 1D torch.int64 prefix tokens.
|
||||
state: Scorer state for prefix tokens
|
||||
x: 2D encoder feature that generates ys.
|
||||
|
||||
Returns:
|
||||
Tuple of
|
||||
torch.float32 scores for next token (n_vocab)
|
||||
and next state for ys
|
||||
|
||||
"""
|
||||
y, new_state = self(y[-1].view(1, 1), state)
|
||||
logp = y.log_softmax(dim=-1).view(-1)
|
||||
return logp, new_state
|
||||
|
||||
def batch_score(
|
||||
self, ys: torch.Tensor, states: torch.Tensor, xs: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Score new token batch.
|
||||
|
||||
Args:
|
||||
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
|
||||
states (List[Any]): Scorer states for prefix tokens.
|
||||
xs (torch.Tensor):
|
||||
The encoder feature that generates ys (n_batch, xlen, n_feat).
|
||||
|
||||
Returns:
|
||||
tuple[torch.Tensor, List[Any]]: Tuple of
|
||||
batchfied scores for next token with shape of `(n_batch, n_vocab)`
|
||||
and next state list for ys.
|
||||
|
||||
"""
|
||||
if states[0] is None:
|
||||
states = None
|
||||
elif isinstance(self.rnn, torch.nn.LSTM):
|
||||
# states: Batch x 2 x (Nlayers, Dim) -> 2 x (Nlayers, Batch, Dim)
|
||||
h = torch.stack([h for h, c in states], dim=1)
|
||||
c = torch.stack([c for h, c in states], dim=1)
|
||||
states = h, c
|
||||
else:
|
||||
# states: Batch x (Nlayers, Dim) -> (Nlayers, Batch, Dim)
|
||||
states = torch.stack(states, dim=1)
|
||||
|
||||
ys, states = self(ys[:, -1:], states)
|
||||
# ys: (Batch, 1, Nvocab) -> (Batch, NVocab)
|
||||
assert ys.size(1) == 1, ys.shape
|
||||
ys = ys.squeeze(1)
|
||||
logp = ys.log_softmax(dim=-1)
|
||||
|
||||
# state: Change to batch first
|
||||
if isinstance(self.rnn, torch.nn.LSTM):
|
||||
# h, c: (Nlayers, Batch, Dim)
|
||||
h, c = states
|
||||
# states: Batch x 2 x (Nlayers, Dim)
|
||||
states = [(h[:, i], c[:, i]) for i in range(h.size(1))]
|
||||
else:
|
||||
# states: (Nlayers, Batch, Dim) -> Batch x (Nlayers, Dim)
|
||||
states = [states[:, i] for i in range(states.size(1))]
|
||||
|
||||
return logp, states
|
||||
131
funasr_local/lm/transformer_lm.py
Normal file
131
funasr_local/lm/transformer_lm.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from funasr_local.modules.embedding import PositionalEncoding
|
||||
from funasr_local.models.encoder.transformer_encoder import TransformerEncoder_s0 as Encoder
|
||||
from funasr_local.modules.mask import subsequent_mask
|
||||
from funasr_local.lm.abs_model import AbsLM
|
||||
|
||||
|
||||
class TransformerLM(AbsLM):
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size: int,
|
||||
pos_enc: str = None,
|
||||
embed_unit: int = 128,
|
||||
att_unit: int = 256,
|
||||
head: int = 2,
|
||||
unit: int = 1024,
|
||||
layer: int = 4,
|
||||
dropout_rate: float = 0.5,
|
||||
):
|
||||
super().__init__()
|
||||
if pos_enc == "sinusoidal":
|
||||
pos_enc_class = PositionalEncoding
|
||||
elif pos_enc is None:
|
||||
|
||||
def pos_enc_class(*args, **kwargs):
|
||||
return nn.Sequential() # indentity
|
||||
|
||||
else:
|
||||
raise ValueError(f"unknown pos-enc option: {pos_enc}")
|
||||
|
||||
self.embed = nn.Embedding(vocab_size, embed_unit)
|
||||
self.encoder = Encoder(
|
||||
idim=embed_unit,
|
||||
attention_dim=att_unit,
|
||||
attention_heads=head,
|
||||
linear_units=unit,
|
||||
num_blocks=layer,
|
||||
dropout_rate=dropout_rate,
|
||||
input_layer="linear",
|
||||
pos_enc_class=pos_enc_class,
|
||||
)
|
||||
self.decoder = nn.Linear(att_unit, vocab_size)
|
||||
|
||||
def _target_mask(self, ys_in_pad):
|
||||
ys_mask = ys_in_pad != 0
|
||||
m = subsequent_mask(ys_mask.size(-1), device=ys_mask.device).unsqueeze(0)
|
||||
return ys_mask.unsqueeze(-2) & m
|
||||
|
||||
def forward(self, input: torch.Tensor, hidden: None) -> Tuple[torch.Tensor, None]:
|
||||
"""Compute LM loss value from buffer sequences.
|
||||
|
||||
Args:
|
||||
input (torch.Tensor): Input ids. (batch, len)
|
||||
hidden (torch.Tensor): Target ids. (batch, len)
|
||||
|
||||
"""
|
||||
x = self.embed(input)
|
||||
mask = self._target_mask(input)
|
||||
h, _ = self.encoder(x, mask)
|
||||
y = self.decoder(h)
|
||||
return y, None
|
||||
|
||||
def score(
|
||||
self, y: torch.Tensor, state: Any, x: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, Any]:
|
||||
"""Score new token.
|
||||
|
||||
Args:
|
||||
y (torch.Tensor): 1D torch.int64 prefix tokens.
|
||||
state: Scorer state for prefix tokens
|
||||
x (torch.Tensor): encoder feature that generates ys.
|
||||
|
||||
Returns:
|
||||
tuple[torch.Tensor, Any]: Tuple of
|
||||
torch.float32 scores for next token (vocab_size)
|
||||
and next state for ys
|
||||
|
||||
"""
|
||||
y = y.unsqueeze(0)
|
||||
h, _, cache = self.encoder.forward_one_step(
|
||||
self.embed(y), self._target_mask(y), cache=state
|
||||
)
|
||||
h = self.decoder(h[:, -1])
|
||||
logp = h.log_softmax(dim=-1).squeeze(0)
|
||||
return logp, cache
|
||||
|
||||
def batch_score(
|
||||
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, List[Any]]:
|
||||
"""Score new token batch.
|
||||
|
||||
Args:
|
||||
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
|
||||
states (List[Any]): Scorer states for prefix tokens.
|
||||
xs (torch.Tensor):
|
||||
The encoder feature that generates ys (n_batch, xlen, n_feat).
|
||||
|
||||
Returns:
|
||||
tuple[torch.Tensor, List[Any]]: Tuple of
|
||||
batchfied scores for next token with shape of `(n_batch, vocab_size)`
|
||||
and next state list for ys.
|
||||
|
||||
"""
|
||||
# merge states
|
||||
n_batch = len(ys)
|
||||
n_layers = len(self.encoder.encoders)
|
||||
if states[0] is None:
|
||||
batch_state = None
|
||||
else:
|
||||
# transpose state of [batch, layer] into [layer, batch]
|
||||
batch_state = [
|
||||
torch.stack([states[b][i] for b in range(n_batch)])
|
||||
for i in range(n_layers)
|
||||
]
|
||||
|
||||
# batch decoding
|
||||
h, _, states = self.encoder.forward_one_step(
|
||||
self.embed(ys), self._target_mask(ys), cache=batch_state
|
||||
)
|
||||
h = self.decoder(h[:, -1])
|
||||
logp = h.log_softmax(dim=-1)
|
||||
|
||||
# transpose state of [layer, batch] into [batch, layer]
|
||||
state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
|
||||
return logp, state_list
|
||||
Reference in New Issue
Block a user