mirror of
https://github.com/HumanAIGC/lite-avatar.git
synced 2026-02-05 09:59:18 +08:00
add files
This commit is contained in:
0
funasr_local/modules/data2vec/__init__.py
Normal file
0
funasr_local/modules/data2vec/__init__.py
Normal file
147
funasr_local/modules/data2vec/data_utils.py
Normal file
147
funasr_local/modules/data2vec/data_utils.py
Normal file
@@ -0,0 +1,147 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
def compute_mask_indices(
|
||||
shape: Tuple[int, int],
|
||||
padding_mask: Optional[torch.Tensor],
|
||||
mask_prob: float,
|
||||
mask_length: int,
|
||||
mask_type: str = "static",
|
||||
mask_other: float = 0.0,
|
||||
min_masks: int = 0,
|
||||
no_overlap: bool = False,
|
||||
min_space: int = 0,
|
||||
require_same_masks: bool = True,
|
||||
mask_dropout: float = 0.0,
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Computes random mask spans for a given shape
|
||||
|
||||
Args:
|
||||
shape: the the shape for which to compute masks.
|
||||
should be of size 2 where first element is batch size and 2nd is timesteps
|
||||
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
|
||||
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
|
||||
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
|
||||
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
|
||||
mask_type: how to compute mask lengths
|
||||
static = fixed size
|
||||
uniform = sample from uniform distribution [mask_other, mask_length*2]
|
||||
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
|
||||
poisson = sample from possion distribution with lambda = mask length
|
||||
min_masks: minimum number of masked spans
|
||||
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
|
||||
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
|
||||
require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample
|
||||
mask_dropout: randomly dropout this percentage of masks in each example
|
||||
"""
|
||||
|
||||
bsz, all_sz = shape
|
||||
mask = np.full((bsz, all_sz), False)
|
||||
|
||||
all_num_mask = int(
|
||||
# add a random number for probabilistic rounding
|
||||
mask_prob * all_sz / float(mask_length)
|
||||
+ np.random.rand()
|
||||
)
|
||||
|
||||
all_num_mask = max(min_masks, all_num_mask)
|
||||
|
||||
mask_idcs = []
|
||||
for i in range(bsz):
|
||||
if padding_mask is not None:
|
||||
sz = all_sz - padding_mask[i].long().sum().item()
|
||||
num_mask = int(
|
||||
# add a random number for probabilistic rounding
|
||||
mask_prob * sz / float(mask_length)
|
||||
+ np.random.rand()
|
||||
)
|
||||
num_mask = max(min_masks, num_mask)
|
||||
else:
|
||||
sz = all_sz
|
||||
num_mask = all_num_mask
|
||||
|
||||
if mask_type == "static":
|
||||
lengths = np.full(num_mask, mask_length)
|
||||
elif mask_type == "uniform":
|
||||
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
|
||||
elif mask_type == "normal":
|
||||
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
|
||||
lengths = [max(1, int(round(x))) for x in lengths]
|
||||
elif mask_type == "poisson":
|
||||
lengths = np.random.poisson(mask_length, size=num_mask)
|
||||
lengths = [int(round(x)) for x in lengths]
|
||||
else:
|
||||
raise Exception("unknown mask selection " + mask_type)
|
||||
|
||||
if sum(lengths) == 0:
|
||||
lengths[0] = min(mask_length, sz - 1)
|
||||
|
||||
if no_overlap:
|
||||
mask_idc = []
|
||||
|
||||
def arrange(s, e, length, keep_length):
|
||||
span_start = np.random.randint(s, e - length)
|
||||
mask_idc.extend(span_start + i for i in range(length))
|
||||
|
||||
new_parts = []
|
||||
if span_start - s - min_space >= keep_length:
|
||||
new_parts.append((s, span_start - min_space + 1))
|
||||
if e - span_start - length - min_space > keep_length:
|
||||
new_parts.append((span_start + length + min_space, e))
|
||||
return new_parts
|
||||
|
||||
parts = [(0, sz)]
|
||||
min_length = min(lengths)
|
||||
for length in sorted(lengths, reverse=True):
|
||||
lens = np.fromiter(
|
||||
(e - s if e - s >= length + min_space else 0 for s, e in parts),
|
||||
np.int,
|
||||
)
|
||||
l_sum = np.sum(lens)
|
||||
if l_sum == 0:
|
||||
break
|
||||
probs = lens / np.sum(lens)
|
||||
c = np.random.choice(len(parts), p=probs)
|
||||
s, e = parts.pop(c)
|
||||
parts.extend(arrange(s, e, length, min_length))
|
||||
mask_idc = np.asarray(mask_idc)
|
||||
else:
|
||||
min_len = min(lengths)
|
||||
if sz - min_len <= num_mask:
|
||||
min_len = sz - num_mask - 1
|
||||
|
||||
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
|
||||
|
||||
mask_idc = np.asarray(
|
||||
[
|
||||
mask_idc[j] + offset
|
||||
for j in range(len(mask_idc))
|
||||
for offset in range(lengths[j])
|
||||
]
|
||||
)
|
||||
|
||||
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
|
||||
|
||||
min_len = min([len(m) for m in mask_idcs])
|
||||
for i, mask_idc in enumerate(mask_idcs):
|
||||
if len(mask_idc) > min_len and require_same_masks:
|
||||
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
|
||||
if mask_dropout > 0:
|
||||
num_holes = np.rint(len(mask_idc) * mask_dropout).astype(int)
|
||||
mask_idc = np.random.choice(
|
||||
mask_idc, len(mask_idc) - num_holes, replace=False
|
||||
)
|
||||
|
||||
mask[i, mask_idc] = True
|
||||
|
||||
return mask
|
||||
132
funasr_local/modules/data2vec/ema_module.py
Normal file
132
funasr_local/modules/data2vec/ema_module.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
"""
|
||||
Used for EMA tracking a given pytorch module. The user is responsible for calling step()
|
||||
and setting the appropriate decay
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class EMAModule:
|
||||
"""Exponential Moving Average of Fairseq Models"""
|
||||
|
||||
def __init__(self, model, ema_decay=0.9999, ema_fp32=False, device=None, skip_keys=None):
|
||||
"""
|
||||
@param model model to initialize the EMA with
|
||||
@param config EMAConfig object with configuration like
|
||||
ema_decay, ema_update_freq, ema_fp32
|
||||
@param device If provided, copy EMA to this device (e.g. gpu).
|
||||
Otherwise EMA is in the same device as the model.
|
||||
"""
|
||||
|
||||
self.decay = ema_decay
|
||||
self.ema_fp32 = ema_fp32
|
||||
self.model = copy.deepcopy(model)
|
||||
self.model.requires_grad_(False)
|
||||
self.skip_keys = skip_keys or set()
|
||||
self.fp32_params = {}
|
||||
|
||||
if device is not None:
|
||||
logging.info(f"Copying EMA model to device {device}")
|
||||
self.model = self.model.to(device=device)
|
||||
|
||||
if self.ema_fp32:
|
||||
self.build_fp32_params()
|
||||
|
||||
self.update_freq_counter = 0
|
||||
|
||||
def build_fp32_params(self, state_dict=None):
|
||||
"""
|
||||
Store a copy of the EMA params in fp32.
|
||||
If state dict is passed, the EMA params is copied from
|
||||
the provided state dict. Otherwise, it is copied from the
|
||||
current EMA model parameters.
|
||||
"""
|
||||
if not self.ema_fp32:
|
||||
raise RuntimeError(
|
||||
"build_fp32_params should not be called if ema_fp32=False. "
|
||||
"Use ema_fp32=True if this is really intended."
|
||||
)
|
||||
|
||||
if state_dict is None:
|
||||
state_dict = self.model.state_dict()
|
||||
|
||||
def _to_float(t):
|
||||
return t.float() if torch.is_floating_point(t) else t
|
||||
|
||||
for param_key in state_dict:
|
||||
if param_key in self.fp32_params:
|
||||
self.fp32_params[param_key].copy_(state_dict[param_key])
|
||||
else:
|
||||
self.fp32_params[param_key] = _to_float(state_dict[param_key])
|
||||
|
||||
def restore(self, state_dict, build_fp32_params=False):
|
||||
"""Load data from a model spec into EMA model"""
|
||||
self.model.load_state_dict(state_dict, strict=False)
|
||||
if build_fp32_params:
|
||||
self.build_fp32_params(state_dict)
|
||||
|
||||
def set_decay(self, decay):
|
||||
self.decay = decay
|
||||
|
||||
def get_decay(self):
|
||||
return self.decay
|
||||
|
||||
def _step_internal(self, new_model):
|
||||
"""One update of the EMA model based on new model weights"""
|
||||
decay = self.decay
|
||||
|
||||
ema_state_dict = {}
|
||||
ema_params = (
|
||||
self.fp32_params if self.ema_fp32 else self.model.state_dict()
|
||||
)
|
||||
for key, param in new_model.state_dict().items():
|
||||
if isinstance(param, dict):
|
||||
continue
|
||||
try:
|
||||
ema_param = ema_params[key]
|
||||
except KeyError:
|
||||
ema_param = (
|
||||
param.float().clone() if param.ndim == 1 else copy.deepcopy(param)
|
||||
)
|
||||
|
||||
if param.shape != ema_param.shape:
|
||||
raise ValueError(
|
||||
"incompatible tensor shapes between model param and ema param"
|
||||
+ "{} vs. {}".format(param.shape, ema_param.shape)
|
||||
)
|
||||
|
||||
if "version" in key:
|
||||
# Do not decay a model.version pytorch param
|
||||
continue
|
||||
|
||||
if key in self.skip_keys or ("num_batches_tracked" in key and ema_param.dtype == torch.int64):
|
||||
ema_param = param.to(dtype=ema_param.dtype).clone()
|
||||
ema_params[key].copy_(ema_param)
|
||||
else:
|
||||
ema_param.mul_(decay)
|
||||
ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1 - decay)
|
||||
ema_state_dict[key] = ema_param
|
||||
self.restore(ema_state_dict, build_fp32_params=False)
|
||||
|
||||
def step(self, new_model):
|
||||
self._step_internal(new_model)
|
||||
|
||||
def reverse(self, model):
|
||||
"""
|
||||
Load the model parameters from EMA model.
|
||||
Useful for inference or fine-tuning from the EMA model.
|
||||
"""
|
||||
d = self.model.state_dict()
|
||||
if "_ema" in d:
|
||||
del d["_ema"]
|
||||
|
||||
model.load_state_dict(d, strict=False)
|
||||
return model
|
||||
18
funasr_local/modules/data2vec/grad_multiply.py
Normal file
18
funasr_local/modules/data2vec/grad_multiply.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class GradMultiply(torch.autograd.Function):
|
||||
@staticmethod
|
||||
def forward(ctx, x, scale):
|
||||
ctx.scale = scale
|
||||
res = x.new(x)
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, grad):
|
||||
return grad * ctx.scale, None
|
||||
671
funasr_local/modules/data2vec/multihead_attention.py
Normal file
671
funasr_local/modules/data2vec/multihead_attention.py
Normal file
@@ -0,0 +1,671 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import logging
|
||||
import math
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import Tensor, nn
|
||||
from torch.nn import Parameter
|
||||
|
||||
from funasr_local.modules.data2vec.quant_noise import quant_noise
|
||||
|
||||
|
||||
class FairseqDropout(nn.Module):
|
||||
def __init__(self, p, module_name=None):
|
||||
super().__init__()
|
||||
self.p = p
|
||||
self.module_name = module_name
|
||||
self.apply_during_inference = False
|
||||
|
||||
def forward(self, x, inplace: bool = False):
|
||||
if self.p > 0 and (self.training or self.apply_during_inference):
|
||||
return F.dropout(x, p=self.p, training=True, inplace=inplace)
|
||||
else:
|
||||
return x
|
||||
|
||||
def make_generation_fast_(
|
||||
self,
|
||||
name: str,
|
||||
retain_dropout: bool = False,
|
||||
retain_dropout_modules: Optional[List[str]] = None,
|
||||
**kwargs
|
||||
):
|
||||
if retain_dropout:
|
||||
if retain_dropout_modules is not None and self.module_name is None:
|
||||
logging.warning(
|
||||
"Cannot enable dropout during inference for module {} "
|
||||
"because module_name was not set".format(name)
|
||||
)
|
||||
elif (
|
||||
retain_dropout_modules is None # if None, apply to all modules
|
||||
or self.module_name in retain_dropout_modules
|
||||
):
|
||||
logging.info(
|
||||
"Enabling dropout during inference for module: {}".format(name)
|
||||
)
|
||||
self.apply_during_inference = True
|
||||
else:
|
||||
logging.info("Disabling dropout for module: {}".format(name))
|
||||
|
||||
|
||||
class MultiheadAttention(nn.Module):
|
||||
"""Multi-headed attention.
|
||||
|
||||
See "Attention Is All You Need" for more details.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim,
|
||||
num_heads,
|
||||
kdim=None,
|
||||
vdim=None,
|
||||
dropout=0.0,
|
||||
bias=True,
|
||||
add_bias_kv=False,
|
||||
add_zero_attn=False,
|
||||
self_attention=False,
|
||||
encoder_decoder_attention=False,
|
||||
q_noise=0.0,
|
||||
qn_block_size=8,
|
||||
):
|
||||
super().__init__()
|
||||
self.embed_dim = embed_dim
|
||||
self.kdim = kdim if kdim is not None else embed_dim
|
||||
self.vdim = vdim if vdim is not None else embed_dim
|
||||
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
|
||||
|
||||
self.num_heads = num_heads
|
||||
self.dropout_module = FairseqDropout(
|
||||
dropout, module_name=self.__class__.__name__
|
||||
)
|
||||
|
||||
self.head_dim = embed_dim // num_heads
|
||||
assert (
|
||||
self.head_dim * num_heads == self.embed_dim
|
||||
), "embed_dim must be divisible by num_heads"
|
||||
self.scaling = self.head_dim ** -0.5
|
||||
|
||||
self.self_attention = self_attention
|
||||
self.encoder_decoder_attention = encoder_decoder_attention
|
||||
|
||||
assert not self.self_attention or self.qkv_same_dim, (
|
||||
"Self-attention requires query, key and " "value to be of the same size"
|
||||
)
|
||||
|
||||
self.k_proj = quant_noise(
|
||||
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
|
||||
)
|
||||
self.v_proj = quant_noise(
|
||||
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
|
||||
)
|
||||
self.q_proj = quant_noise(
|
||||
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
|
||||
)
|
||||
|
||||
self.out_proj = quant_noise(
|
||||
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
|
||||
)
|
||||
|
||||
if add_bias_kv:
|
||||
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
|
||||
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
|
||||
else:
|
||||
self.bias_k = self.bias_v = None
|
||||
|
||||
self.add_zero_attn = add_zero_attn
|
||||
|
||||
self.reset_parameters()
|
||||
|
||||
self.onnx_trace = False
|
||||
self.skip_embed_dim_check = False
|
||||
|
||||
def prepare_for_onnx_export_(self):
|
||||
self.onnx_trace = True
|
||||
|
||||
def reset_parameters(self):
|
||||
if self.qkv_same_dim:
|
||||
# Empirically observed the convergence to be much better with
|
||||
# the scaled initialization
|
||||
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
|
||||
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
|
||||
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
|
||||
else:
|
||||
nn.init.xavier_uniform_(self.k_proj.weight)
|
||||
nn.init.xavier_uniform_(self.v_proj.weight)
|
||||
nn.init.xavier_uniform_(self.q_proj.weight)
|
||||
|
||||
nn.init.xavier_uniform_(self.out_proj.weight)
|
||||
if self.out_proj.bias is not None:
|
||||
nn.init.constant_(self.out_proj.bias, 0.0)
|
||||
if self.bias_k is not None:
|
||||
nn.init.xavier_normal_(self.bias_k)
|
||||
if self.bias_v is not None:
|
||||
nn.init.xavier_normal_(self.bias_v)
|
||||
|
||||
def _get_reserve_head_index(self, num_heads_to_keep: int):
|
||||
k_proj_heads_norm = []
|
||||
q_proj_heads_norm = []
|
||||
v_proj_heads_norm = []
|
||||
|
||||
for i in range(self.num_heads):
|
||||
start_idx = i * self.head_dim
|
||||
end_idx = (i + 1) * self.head_dim
|
||||
k_proj_heads_norm.append(
|
||||
torch.sum(
|
||||
torch.abs(
|
||||
self.k_proj.weight[
|
||||
start_idx:end_idx,
|
||||
]
|
||||
)
|
||||
).tolist()
|
||||
+ torch.sum(torch.abs(self.k_proj.bias[start_idx:end_idx])).tolist()
|
||||
)
|
||||
q_proj_heads_norm.append(
|
||||
torch.sum(
|
||||
torch.abs(
|
||||
self.q_proj.weight[
|
||||
start_idx:end_idx,
|
||||
]
|
||||
)
|
||||
).tolist()
|
||||
+ torch.sum(torch.abs(self.q_proj.bias[start_idx:end_idx])).tolist()
|
||||
)
|
||||
v_proj_heads_norm.append(
|
||||
torch.sum(
|
||||
torch.abs(
|
||||
self.v_proj.weight[
|
||||
start_idx:end_idx,
|
||||
]
|
||||
)
|
||||
).tolist()
|
||||
+ torch.sum(torch.abs(self.v_proj.bias[start_idx:end_idx])).tolist()
|
||||
)
|
||||
|
||||
heads_norm = []
|
||||
for i in range(self.num_heads):
|
||||
heads_norm.append(
|
||||
k_proj_heads_norm[i] + q_proj_heads_norm[i] + v_proj_heads_norm[i]
|
||||
)
|
||||
|
||||
sorted_head_index = sorted(
|
||||
range(self.num_heads), key=lambda k: heads_norm[k], reverse=True
|
||||
)
|
||||
reserve_head_index = []
|
||||
for i in range(num_heads_to_keep):
|
||||
start = sorted_head_index[i] * self.head_dim
|
||||
end = (sorted_head_index[i] + 1) * self.head_dim
|
||||
reserve_head_index.append((start, end))
|
||||
return reserve_head_index
|
||||
|
||||
def _adaptive_prune_heads(self, reserve_head_index: List[Tuple[int, int]]):
|
||||
new_q_weight = []
|
||||
new_q_bias = []
|
||||
new_k_weight = []
|
||||
new_k_bias = []
|
||||
new_v_weight = []
|
||||
new_v_bias = []
|
||||
new_out_proj_weight = []
|
||||
|
||||
for ele in reserve_head_index:
|
||||
start_idx, end_idx = ele
|
||||
new_q_weight.append(
|
||||
self.q_proj.weight[
|
||||
start_idx:end_idx,
|
||||
]
|
||||
)
|
||||
new_q_bias.append(self.q_proj.bias[start_idx:end_idx])
|
||||
|
||||
new_k_weight.append(
|
||||
self.k_proj.weight[
|
||||
start_idx:end_idx,
|
||||
]
|
||||
)
|
||||
|
||||
new_k_bias.append(self.k_proj.bias[start_idx:end_idx])
|
||||
|
||||
new_v_weight.append(
|
||||
self.v_proj.weight[
|
||||
start_idx:end_idx,
|
||||
]
|
||||
)
|
||||
new_v_bias.append(self.v_proj.bias[start_idx:end_idx])
|
||||
|
||||
new_out_proj_weight.append(self.out_proj.weight[:, start_idx:end_idx])
|
||||
|
||||
new_q_weight = torch.cat(new_q_weight).detach()
|
||||
new_k_weight = torch.cat(new_k_weight).detach()
|
||||
new_v_weight = torch.cat(new_v_weight).detach()
|
||||
new_out_proj_weight = torch.cat(new_out_proj_weight, dim=-1).detach()
|
||||
new_q_weight.requires_grad = True
|
||||
new_k_weight.requires_grad = True
|
||||
new_v_weight.requires_grad = True
|
||||
new_out_proj_weight.requires_grad = True
|
||||
|
||||
new_q_bias = torch.cat(new_q_bias).detach()
|
||||
new_q_bias.requires_grad = True
|
||||
|
||||
new_k_bias = torch.cat(new_k_bias).detach()
|
||||
new_k_bias.requires_grad = True
|
||||
|
||||
new_v_bias = torch.cat(new_v_bias).detach()
|
||||
new_v_bias.requires_grad = True
|
||||
|
||||
self.q_proj.weight = torch.nn.Parameter(new_q_weight)
|
||||
self.q_proj.bias = torch.nn.Parameter(new_q_bias)
|
||||
|
||||
self.k_proj.weight = torch.nn.Parameter(new_k_weight)
|
||||
self.k_proj.bias = torch.nn.Parameter(new_k_bias)
|
||||
|
||||
self.v_proj.weight = torch.nn.Parameter(new_v_weight)
|
||||
self.v_proj.bias = torch.nn.Parameter(new_v_bias)
|
||||
|
||||
self.out_proj.weight = torch.nn.Parameter(new_out_proj_weight)
|
||||
|
||||
self.num_heads = len(reserve_head_index)
|
||||
self.embed_dim = self.head_dim * self.num_heads
|
||||
self.q_proj.out_features = self.embed_dim
|
||||
self.k_proj.out_features = self.embed_dim
|
||||
self.v_proj.out_features = self.embed_dim
|
||||
|
||||
def _set_skip_embed_dim_check(self):
|
||||
self.skip_embed_dim_check = True
|
||||
|
||||
def forward(
|
||||
self,
|
||||
query,
|
||||
key: Optional[Tensor],
|
||||
value: Optional[Tensor],
|
||||
key_padding_mask: Optional[Tensor] = None,
|
||||
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
|
||||
need_weights: bool = True,
|
||||
static_kv: bool = False,
|
||||
attn_mask: Optional[Tensor] = None,
|
||||
before_softmax: bool = False,
|
||||
need_head_weights: bool = False,
|
||||
) -> Tuple[Tensor, Optional[Tensor]]:
|
||||
"""Input shape: Time x Batch x Channel
|
||||
|
||||
Args:
|
||||
key_padding_mask (ByteTensor, optional): mask to exclude
|
||||
keys that are pads, of shape `(batch, src_len)`, where
|
||||
padding elements are indicated by 1s.
|
||||
need_weights (bool, optional): return the attention weights,
|
||||
averaged over heads (default: False).
|
||||
attn_mask (ByteTensor, optional): typically used to
|
||||
implement causal attention, where the mask prevents the
|
||||
attention from looking forward in time (default: None).
|
||||
before_softmax (bool, optional): return the raw attention
|
||||
weights and values before the attention softmax.
|
||||
need_head_weights (bool, optional): return the attention
|
||||
weights for each head. Implies *need_weights*. Default:
|
||||
return the average attention weights over all heads.
|
||||
"""
|
||||
if need_head_weights:
|
||||
need_weights = True
|
||||
|
||||
is_tpu = query.device.type == "xla"
|
||||
|
||||
tgt_len, bsz, embed_dim = query.size()
|
||||
src_len = tgt_len
|
||||
if not self.skip_embed_dim_check:
|
||||
assert (
|
||||
embed_dim == self.embed_dim
|
||||
), f"query dim {embed_dim} != {self.embed_dim}"
|
||||
assert list(query.size()) == [tgt_len, bsz, embed_dim]
|
||||
if key is not None:
|
||||
src_len, key_bsz, _ = key.size()
|
||||
if not torch.jit.is_scripting():
|
||||
assert key_bsz == bsz
|
||||
assert value is not None
|
||||
assert src_len, bsz == value.shape[:2]
|
||||
|
||||
if (
|
||||
not self.onnx_trace
|
||||
and not is_tpu # don't use PyTorch version on TPUs
|
||||
and incremental_state is None
|
||||
and not static_kv
|
||||
# A workaround for quantization to work. Otherwise JIT compilation
|
||||
# treats bias in linear module as method.
|
||||
and not torch.jit.is_scripting()
|
||||
# The Multihead attention implemented in pytorch forces strong dimension check
|
||||
# for input embedding dimention and K,Q,V projection dimension.
|
||||
# Since pruning will break the dimension check and it is not easy to modify the pytorch API,
|
||||
# it is preferred to bypass the pytorch MHA when we need to skip embed_dim_check
|
||||
and not self.skip_embed_dim_check
|
||||
):
|
||||
assert key is not None and value is not None
|
||||
return F.multi_head_attention_forward(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
self.embed_dim,
|
||||
self.num_heads,
|
||||
torch.empty([0]),
|
||||
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
|
||||
self.bias_k,
|
||||
self.bias_v,
|
||||
self.add_zero_attn,
|
||||
self.dropout_module.p,
|
||||
self.out_proj.weight,
|
||||
self.out_proj.bias,
|
||||
self.training or self.dropout_module.apply_during_inference,
|
||||
key_padding_mask,
|
||||
need_weights,
|
||||
attn_mask,
|
||||
use_separate_proj_weight=True,
|
||||
q_proj_weight=self.q_proj.weight,
|
||||
k_proj_weight=self.k_proj.weight,
|
||||
v_proj_weight=self.v_proj.weight,
|
||||
)
|
||||
|
||||
if incremental_state is not None:
|
||||
saved_state = self._get_input_buffer(incremental_state)
|
||||
if saved_state is not None and "prev_key" in saved_state:
|
||||
# previous time steps are cached - no need to recompute
|
||||
# key and value if they are static
|
||||
if static_kv:
|
||||
assert self.encoder_decoder_attention and not self.self_attention
|
||||
key = value = None
|
||||
else:
|
||||
saved_state = None
|
||||
|
||||
if self.self_attention:
|
||||
q = self.q_proj(query)
|
||||
k = self.k_proj(query)
|
||||
v = self.v_proj(query)
|
||||
elif self.encoder_decoder_attention:
|
||||
# encoder-decoder attention
|
||||
q = self.q_proj(query)
|
||||
if key is None:
|
||||
assert value is None
|
||||
k = v = None
|
||||
else:
|
||||
k = self.k_proj(key)
|
||||
v = self.v_proj(key)
|
||||
|
||||
else:
|
||||
assert key is not None and value is not None
|
||||
q = self.q_proj(query)
|
||||
k = self.k_proj(key)
|
||||
v = self.v_proj(value)
|
||||
q *= self.scaling
|
||||
|
||||
if self.bias_k is not None:
|
||||
assert self.bias_v is not None
|
||||
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
|
||||
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
|
||||
if attn_mask is not None:
|
||||
attn_mask = torch.cat(
|
||||
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
|
||||
)
|
||||
if key_padding_mask is not None:
|
||||
key_padding_mask = torch.cat(
|
||||
[
|
||||
key_padding_mask,
|
||||
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
q = (
|
||||
q.contiguous()
|
||||
.view(tgt_len, bsz * self.num_heads, self.head_dim)
|
||||
.transpose(0, 1)
|
||||
)
|
||||
if k is not None:
|
||||
k = (
|
||||
k.contiguous()
|
||||
.view(-1, bsz * self.num_heads, self.head_dim)
|
||||
.transpose(0, 1)
|
||||
)
|
||||
if v is not None:
|
||||
v = (
|
||||
v.contiguous()
|
||||
.view(-1, bsz * self.num_heads, self.head_dim)
|
||||
.transpose(0, 1)
|
||||
)
|
||||
|
||||
if saved_state is not None:
|
||||
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
|
||||
if "prev_key" in saved_state:
|
||||
_prev_key = saved_state["prev_key"]
|
||||
assert _prev_key is not None
|
||||
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
|
||||
if static_kv:
|
||||
k = prev_key
|
||||
else:
|
||||
assert k is not None
|
||||
k = torch.cat([prev_key, k], dim=1)
|
||||
src_len = k.size(1)
|
||||
if "prev_value" in saved_state:
|
||||
_prev_value = saved_state["prev_value"]
|
||||
assert _prev_value is not None
|
||||
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
|
||||
if static_kv:
|
||||
v = prev_value
|
||||
else:
|
||||
assert v is not None
|
||||
v = torch.cat([prev_value, v], dim=1)
|
||||
prev_key_padding_mask: Optional[Tensor] = None
|
||||
if "prev_key_padding_mask" in saved_state:
|
||||
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
|
||||
assert k is not None and v is not None
|
||||
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
|
||||
key_padding_mask=key_padding_mask,
|
||||
prev_key_padding_mask=prev_key_padding_mask,
|
||||
batch_size=bsz,
|
||||
src_len=k.size(1),
|
||||
static_kv=static_kv,
|
||||
)
|
||||
|
||||
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
|
||||
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
|
||||
saved_state["prev_key_padding_mask"] = key_padding_mask
|
||||
# In this branch incremental_state is never None
|
||||
assert incremental_state is not None
|
||||
incremental_state = self._set_input_buffer(incremental_state, saved_state)
|
||||
assert k is not None
|
||||
assert k.size(1) == src_len
|
||||
|
||||
# This is part of a workaround to get around fork/join parallelism
|
||||
# not supporting Optional types.
|
||||
if key_padding_mask is not None and key_padding_mask.dim() == 0:
|
||||
key_padding_mask = None
|
||||
|
||||
if key_padding_mask is not None:
|
||||
assert key_padding_mask.size(0) == bsz
|
||||
assert key_padding_mask.size(1) == src_len
|
||||
|
||||
if self.add_zero_attn:
|
||||
assert v is not None
|
||||
src_len += 1
|
||||
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
|
||||
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
|
||||
if attn_mask is not None:
|
||||
attn_mask = torch.cat(
|
||||
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
|
||||
)
|
||||
if key_padding_mask is not None:
|
||||
key_padding_mask = torch.cat(
|
||||
[
|
||||
key_padding_mask,
|
||||
torch.zeros(key_padding_mask.size(0), 1).type_as(
|
||||
key_padding_mask
|
||||
),
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
attn_weights = torch.bmm(q, k.transpose(1, 2))
|
||||
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
|
||||
|
||||
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
|
||||
|
||||
if attn_mask is not None:
|
||||
attn_mask = attn_mask.unsqueeze(0)
|
||||
if self.onnx_trace:
|
||||
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
|
||||
attn_weights += attn_mask
|
||||
|
||||
if key_padding_mask is not None:
|
||||
# don't attend to padding symbols
|
||||
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
||||
if not is_tpu:
|
||||
attn_weights = attn_weights.masked_fill(
|
||||
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
|
||||
float("-inf"),
|
||||
)
|
||||
else:
|
||||
attn_weights = attn_weights.transpose(0, 2)
|
||||
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
|
||||
attn_weights = attn_weights.transpose(0, 2)
|
||||
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
||||
|
||||
if before_softmax:
|
||||
return attn_weights, v
|
||||
|
||||
attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32)
|
||||
attn_weights = attn_weights_float.type_as(attn_weights)
|
||||
attn_probs = self.dropout_module(attn_weights)
|
||||
|
||||
assert v is not None
|
||||
attn = torch.bmm(attn_probs, v)
|
||||
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
|
||||
if self.onnx_trace and attn.size(1) == 1:
|
||||
# when ONNX tracing a single decoder step (sequence length == 1)
|
||||
# the transpose is a no-op copy before view, thus unnecessary
|
||||
attn = attn.contiguous().view(tgt_len, bsz, self.embed_dim)
|
||||
else:
|
||||
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
|
||||
attn = self.out_proj(attn)
|
||||
attn_weights: Optional[Tensor] = None
|
||||
if need_weights:
|
||||
attn_weights = attn_weights_float.view(
|
||||
bsz, self.num_heads, tgt_len, src_len
|
||||
).transpose(1, 0)
|
||||
if not need_head_weights:
|
||||
# average attention weights over heads
|
||||
attn_weights = attn_weights.mean(dim=0)
|
||||
|
||||
return attn, attn_weights
|
||||
|
||||
@staticmethod
|
||||
def _append_prev_key_padding_mask(
|
||||
key_padding_mask: Optional[Tensor],
|
||||
prev_key_padding_mask: Optional[Tensor],
|
||||
batch_size: int,
|
||||
src_len: int,
|
||||
static_kv: bool,
|
||||
) -> Optional[Tensor]:
|
||||
# saved key padding masks have shape (bsz, seq_len)
|
||||
if prev_key_padding_mask is not None and static_kv:
|
||||
new_key_padding_mask = prev_key_padding_mask
|
||||
elif prev_key_padding_mask is not None and key_padding_mask is not None:
|
||||
new_key_padding_mask = torch.cat(
|
||||
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
|
||||
)
|
||||
# During incremental decoding, as the padding token enters and
|
||||
# leaves the frame, there will be a time when prev or current
|
||||
# is None
|
||||
elif prev_key_padding_mask is not None:
|
||||
if src_len > prev_key_padding_mask.size(1):
|
||||
filler = torch.zeros(
|
||||
(batch_size, src_len - prev_key_padding_mask.size(1)),
|
||||
device=prev_key_padding_mask.device,
|
||||
)
|
||||
new_key_padding_mask = torch.cat(
|
||||
[prev_key_padding_mask.float(), filler.float()], dim=1
|
||||
)
|
||||
else:
|
||||
new_key_padding_mask = prev_key_padding_mask.float()
|
||||
elif key_padding_mask is not None:
|
||||
if src_len > key_padding_mask.size(1):
|
||||
filler = torch.zeros(
|
||||
(batch_size, src_len - key_padding_mask.size(1)),
|
||||
device=key_padding_mask.device,
|
||||
)
|
||||
new_key_padding_mask = torch.cat(
|
||||
[filler.float(), key_padding_mask.float()], dim=1
|
||||
)
|
||||
else:
|
||||
new_key_padding_mask = key_padding_mask.float()
|
||||
else:
|
||||
new_key_padding_mask = prev_key_padding_mask
|
||||
return new_key_padding_mask
|
||||
|
||||
@torch.jit.export
|
||||
def reorder_incremental_state(
|
||||
self,
|
||||
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
|
||||
new_order: Tensor,
|
||||
):
|
||||
"""Reorder buffered internal state (for incremental generation)."""
|
||||
input_buffer = self._get_input_buffer(incremental_state)
|
||||
if input_buffer is not None:
|
||||
for k in input_buffer.keys():
|
||||
input_buffer_k = input_buffer[k]
|
||||
if input_buffer_k is not None:
|
||||
if self.encoder_decoder_attention and input_buffer_k.size(
|
||||
0
|
||||
) == new_order.size(0):
|
||||
break
|
||||
input_buffer[k] = input_buffer_k.index_select(0, new_order)
|
||||
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
|
||||
return incremental_state
|
||||
|
||||
def _get_input_buffer(
|
||||
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
|
||||
) -> Dict[str, Optional[Tensor]]:
|
||||
result = self.get_incremental_state(incremental_state, "attn_state")
|
||||
if result is not None:
|
||||
return result
|
||||
else:
|
||||
empty_result: Dict[str, Optional[Tensor]] = {}
|
||||
return empty_result
|
||||
|
||||
def _set_input_buffer(
|
||||
self,
|
||||
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
|
||||
buffer: Dict[str, Optional[Tensor]],
|
||||
):
|
||||
return self.set_incremental_state(incremental_state, "attn_state", buffer)
|
||||
|
||||
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
|
||||
return attn_weights
|
||||
|
||||
def upgrade_state_dict_named(self, state_dict, name):
|
||||
prefix = name + "." if name != "" else ""
|
||||
items_to_add = {}
|
||||
keys_to_remove = []
|
||||
for k in state_dict.keys():
|
||||
if k.endswith(prefix + "in_proj_weight"):
|
||||
# in_proj_weight used to be q + k + v with same dimensions
|
||||
dim = int(state_dict[k].shape[0] / 3)
|
||||
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
|
||||
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim: 2 * dim]
|
||||
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim:]
|
||||
|
||||
keys_to_remove.append(k)
|
||||
|
||||
k_bias = prefix + "in_proj_bias"
|
||||
if k_bias in state_dict.keys():
|
||||
dim = int(state_dict[k].shape[0] / 3)
|
||||
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
|
||||
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
|
||||
dim: 2 * dim
|
||||
]
|
||||
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim:]
|
||||
|
||||
keys_to_remove.append(prefix + "in_proj_bias")
|
||||
|
||||
for k in keys_to_remove:
|
||||
del state_dict[k]
|
||||
|
||||
for key, value in items_to_add.items():
|
||||
state_dict[key] = value
|
||||
107
funasr_local/modules/data2vec/quant_noise.py
Normal file
107
funasr_local/modules/data2vec/quant_noise.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def quant_noise(module, p, block_size):
|
||||
"""
|
||||
Wraps modules and applies quantization noise to the weights for
|
||||
subsequent quantization with Iterative Product Quantization as
|
||||
described in "Training with Quantization Noise for Extreme Model Compression"
|
||||
|
||||
Args:
|
||||
- module: nn.Module
|
||||
- p: amount of Quantization Noise
|
||||
- block_size: size of the blocks for subsequent quantization with iPQ
|
||||
|
||||
Remarks:
|
||||
- Module weights must have the right sizes wrt the block size
|
||||
- Only Linear, Embedding and Conv2d modules are supported for the moment
|
||||
- For more detail on how to quantize by blocks with convolutional weights,
|
||||
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
|
||||
- We implement the simplest form of noise here as stated in the paper
|
||||
which consists in randomly dropping blocks
|
||||
"""
|
||||
|
||||
# if no quantization noise, don't register hook
|
||||
if p <= 0:
|
||||
return module
|
||||
|
||||
# supported modules
|
||||
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
|
||||
|
||||
# test whether module.weight has the right sizes wrt block_size
|
||||
is_conv = module.weight.ndim == 4
|
||||
|
||||
# 2D matrix
|
||||
if not is_conv:
|
||||
assert (
|
||||
module.weight.size(1) % block_size == 0
|
||||
), "Input features must be a multiple of block sizes"
|
||||
|
||||
# 4D matrix
|
||||
else:
|
||||
# 1x1 convolutions
|
||||
if module.kernel_size == (1, 1):
|
||||
assert (
|
||||
module.in_channels % block_size == 0
|
||||
), "Input channels must be a multiple of block sizes"
|
||||
# regular convolutions
|
||||
else:
|
||||
k = module.kernel_size[0] * module.kernel_size[1]
|
||||
assert k % block_size == 0, "Kernel size must be a multiple of block size"
|
||||
|
||||
def _forward_pre_hook(mod, input):
|
||||
# no noise for evaluation
|
||||
if mod.training:
|
||||
if not is_conv:
|
||||
# gather weight and sizes
|
||||
weight = mod.weight
|
||||
in_features = weight.size(1)
|
||||
out_features = weight.size(0)
|
||||
|
||||
# split weight matrix into blocks and randomly drop selected blocks
|
||||
mask = torch.zeros(
|
||||
in_features // block_size * out_features, device=weight.device
|
||||
)
|
||||
mask.bernoulli_(p)
|
||||
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
|
||||
|
||||
else:
|
||||
# gather weight and sizes
|
||||
weight = mod.weight
|
||||
in_channels = mod.in_channels
|
||||
out_channels = mod.out_channels
|
||||
|
||||
# split weight matrix into blocks and randomly drop selected blocks
|
||||
if mod.kernel_size == (1, 1):
|
||||
mask = torch.zeros(
|
||||
int(in_channels // block_size * out_channels),
|
||||
device=weight.device,
|
||||
)
|
||||
mask.bernoulli_(p)
|
||||
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
|
||||
else:
|
||||
mask = torch.zeros(
|
||||
weight.size(0), weight.size(1), device=weight.device
|
||||
)
|
||||
mask.bernoulli_(p)
|
||||
mask = (
|
||||
mask.unsqueeze(2)
|
||||
.unsqueeze(3)
|
||||
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
|
||||
)
|
||||
|
||||
# scale weights and apply mask
|
||||
mask = mask.to(
|
||||
torch.bool
|
||||
) # x.bool() is not currently supported in TorchScript
|
||||
s = 1 / (1 - p)
|
||||
mod.weight.data = s * weight.masked_fill(mask, 0)
|
||||
|
||||
module.register_forward_pre_hook(_forward_pre_hook)
|
||||
return module
|
||||
156
funasr_local/modules/data2vec/utils.py
Normal file
156
funasr_local/modules/data2vec/utils.py
Normal file
@@ -0,0 +1,156 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import math
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from funasr_local.modules.data2vec.multihead_attention import MultiheadAttention
|
||||
|
||||
|
||||
class Fp32LayerNorm(nn.LayerNorm):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def forward(self, input):
|
||||
output = F.layer_norm(
|
||||
input.float(),
|
||||
self.normalized_shape,
|
||||
self.weight.float() if self.weight is not None else None,
|
||||
self.bias.float() if self.bias is not None else None,
|
||||
self.eps,
|
||||
)
|
||||
return output.type_as(input)
|
||||
|
||||
|
||||
class Fp32GroupNorm(nn.GroupNorm):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def forward(self, input):
|
||||
output = F.group_norm(
|
||||
input.float(),
|
||||
self.num_groups,
|
||||
self.weight.float() if self.weight is not None else None,
|
||||
self.bias.float() if self.bias is not None else None,
|
||||
self.eps,
|
||||
)
|
||||
return output.type_as(input)
|
||||
|
||||
|
||||
class TransposeLast(nn.Module):
|
||||
def __init__(self, deconstruct_idx=None):
|
||||
super().__init__()
|
||||
self.deconstruct_idx = deconstruct_idx
|
||||
|
||||
def forward(self, x):
|
||||
if self.deconstruct_idx is not None:
|
||||
x = x[self.deconstruct_idx]
|
||||
return x.transpose(-2, -1)
|
||||
|
||||
|
||||
class SamePad(nn.Module):
|
||||
def __init__(self, kernel_size, causal=False):
|
||||
super().__init__()
|
||||
if causal:
|
||||
self.remove = kernel_size - 1
|
||||
else:
|
||||
self.remove = 1 if kernel_size % 2 == 0 else 0
|
||||
|
||||
def forward(self, x):
|
||||
if self.remove > 0:
|
||||
x = x[:, :, : -self.remove]
|
||||
return x
|
||||
|
||||
|
||||
def pad_to_multiple(x, multiple, dim=-1, value=0):
|
||||
# Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41
|
||||
if x is None:
|
||||
return None, 0
|
||||
tsz = x.size(dim)
|
||||
m = tsz / multiple
|
||||
remainder = math.ceil(m) * multiple - tsz
|
||||
if m.is_integer():
|
||||
return x, 0
|
||||
pad_offset = (0,) * (-1 - dim) * 2
|
||||
|
||||
return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder
|
||||
|
||||
|
||||
def gelu_accurate(x):
|
||||
if not hasattr(gelu_accurate, "_a"):
|
||||
gelu_accurate._a = math.sqrt(2 / math.pi)
|
||||
return (
|
||||
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
|
||||
)
|
||||
|
||||
|
||||
def gelu(x: torch.Tensor) -> torch.Tensor:
|
||||
return torch.nn.functional.gelu(x.float()).type_as(x)
|
||||
|
||||
|
||||
def get_available_activation_fns():
|
||||
return [
|
||||
"relu",
|
||||
"gelu",
|
||||
"gelu_fast", # deprecated
|
||||
"gelu_accurate",
|
||||
"tanh",
|
||||
"linear",
|
||||
]
|
||||
|
||||
|
||||
def get_activation_fn(activation: str):
|
||||
"""Returns the activation function corresponding to `activation`"""
|
||||
|
||||
if activation == "relu":
|
||||
return F.relu
|
||||
elif activation == "gelu":
|
||||
return gelu
|
||||
elif activation == "gelu_accurate":
|
||||
return gelu_accurate
|
||||
elif activation == "tanh":
|
||||
return torch.tanh
|
||||
elif activation == "linear":
|
||||
return lambda x: x
|
||||
elif activation == "swish":
|
||||
return torch.nn.SiLU
|
||||
else:
|
||||
raise RuntimeError("--activation-fn {} not supported".format(activation))
|
||||
|
||||
|
||||
def init_bert_params(module):
|
||||
"""
|
||||
Initialize the weights specific to the BERT Model.
|
||||
This overrides the default initializations depending on the specified arguments.
|
||||
1. If normal_init_linear_weights is set then weights of linear
|
||||
layer will be initialized using the normal distribution and
|
||||
bais will be set to the specified value.
|
||||
2. If normal_init_embed_weights is set then weights of embedding
|
||||
layer will be initialized using the normal distribution.
|
||||
3. If normal_init_proj_weights is set then weights of
|
||||
in_project_weight for MultiHeadAttention initialized using
|
||||
the normal distribution (to be validated).
|
||||
"""
|
||||
|
||||
def normal_(data):
|
||||
# with FSDP, module params will be on CUDA, so we cast them back to CPU
|
||||
# so that the RNG is consistent with and without FSDP
|
||||
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
|
||||
|
||||
if isinstance(module, nn.Linear):
|
||||
normal_(module.weight.data)
|
||||
if module.bias is not None:
|
||||
module.bias.data.zero_()
|
||||
if isinstance(module, nn.Embedding):
|
||||
normal_(module.weight.data)
|
||||
if module.padding_idx is not None:
|
||||
module.weight.data[module.padding_idx].zero_()
|
||||
if isinstance(module, MultiheadAttention):
|
||||
normal_(module.q_proj.weight.data)
|
||||
normal_(module.k_proj.weight.data)
|
||||
normal_(module.v_proj.weight.data)
|
||||
407
funasr_local/modules/data2vec/wav2vec2.py
Normal file
407
funasr_local/modules/data2vec/wav2vec2.py
Normal file
@@ -0,0 +1,407 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import logging
|
||||
import math
|
||||
from typing import List, Tuple
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from funasr_local.modules.data2vec import utils
|
||||
from funasr_local.modules.data2vec.multihead_attention import MultiheadAttention
|
||||
|
||||
|
||||
class ConvFeatureExtractionModel(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
conv_layers: List[Tuple[int, int, int]],
|
||||
dropout: float = 0.0,
|
||||
mode: str = "default",
|
||||
conv_bias: bool = False,
|
||||
in_d: int = 1
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
assert mode in {"default", "layer_norm"}
|
||||
|
||||
def block(
|
||||
n_in,
|
||||
n_out,
|
||||
k,
|
||||
stride,
|
||||
is_layer_norm=False,
|
||||
is_group_norm=False,
|
||||
conv_bias=False,
|
||||
):
|
||||
def make_conv():
|
||||
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
|
||||
nn.init.kaiming_normal_(conv.weight)
|
||||
return conv
|
||||
|
||||
assert (
|
||||
is_layer_norm and is_group_norm
|
||||
) == False, "layer norm and group norm are exclusive"
|
||||
|
||||
if is_layer_norm:
|
||||
return nn.Sequential(
|
||||
make_conv(),
|
||||
nn.Dropout(p=dropout),
|
||||
nn.Sequential(
|
||||
utils.TransposeLast(),
|
||||
utils.Fp32LayerNorm(dim, elementwise_affine=True),
|
||||
utils.TransposeLast(),
|
||||
),
|
||||
nn.GELU(),
|
||||
)
|
||||
elif is_group_norm:
|
||||
return nn.Sequential(
|
||||
make_conv(),
|
||||
nn.Dropout(p=dropout),
|
||||
utils.Fp32GroupNorm(dim, dim, affine=True),
|
||||
nn.GELU(),
|
||||
)
|
||||
else:
|
||||
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
|
||||
|
||||
self.conv_layers = nn.ModuleList()
|
||||
for i, cl in enumerate(conv_layers):
|
||||
assert len(cl) == 3, "invalid conv definition: " + str(cl)
|
||||
(dim, k, stride) = cl
|
||||
|
||||
self.conv_layers.append(
|
||||
block(
|
||||
in_d,
|
||||
dim,
|
||||
k,
|
||||
stride,
|
||||
is_layer_norm=mode == "layer_norm",
|
||||
is_group_norm=mode == "default" and i == 0,
|
||||
conv_bias=conv_bias,
|
||||
)
|
||||
)
|
||||
in_d = dim
|
||||
|
||||
def forward(self, x):
|
||||
if len(x.shape) == 2:
|
||||
x = x.unsqueeze(1)
|
||||
else:
|
||||
x = x.transpose(1, 2)
|
||||
|
||||
for conv in self.conv_layers:
|
||||
x = conv(x)
|
||||
return x
|
||||
|
||||
|
||||
def make_conv_pos(e, k, g):
|
||||
pos_conv = nn.Conv1d(
|
||||
e,
|
||||
e,
|
||||
kernel_size=k,
|
||||
padding=k // 2,
|
||||
groups=g,
|
||||
)
|
||||
dropout = 0
|
||||
std = math.sqrt((4 * (1.0 - dropout)) / (k * e))
|
||||
nn.init.normal_(pos_conv.weight, mean=0, std=std)
|
||||
nn.init.constant_(pos_conv.bias, 0)
|
||||
|
||||
pos_conv = nn.utils.weight_norm(pos_conv, name="weight", dim=2)
|
||||
pos_conv = nn.Sequential(pos_conv, utils.SamePad(k), nn.GELU())
|
||||
|
||||
return pos_conv
|
||||
|
||||
|
||||
class TransformerEncoder(nn.Module):
|
||||
def build_encoder_layer(self):
|
||||
if self.layer_type == "transformer":
|
||||
layer = TransformerSentenceEncoderLayer(
|
||||
embedding_dim=self.embedding_dim,
|
||||
ffn_embedding_dim=self.encoder_ffn_embed_dim,
|
||||
num_attention_heads=self.encoder_attention_heads,
|
||||
dropout=self.dropout,
|
||||
attention_dropout=self.attention_dropout,
|
||||
activation_dropout=self.activation_dropout,
|
||||
activation_fn=self.activation_fn,
|
||||
layer_norm_first=self.layer_norm_first,
|
||||
)
|
||||
else:
|
||||
logging.error("Only transformer is supported for data2vec now")
|
||||
return layer
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
# position
|
||||
dropout,
|
||||
encoder_embed_dim,
|
||||
required_seq_len_multiple,
|
||||
pos_conv_depth,
|
||||
conv_pos,
|
||||
conv_pos_groups,
|
||||
# transformer layers
|
||||
layer_type,
|
||||
encoder_layers,
|
||||
encoder_ffn_embed_dim,
|
||||
encoder_attention_heads,
|
||||
attention_dropout,
|
||||
activation_dropout,
|
||||
activation_fn,
|
||||
layer_norm_first,
|
||||
encoder_layerdrop,
|
||||
max_positions,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
# position
|
||||
self.dropout = dropout
|
||||
self.embedding_dim = encoder_embed_dim
|
||||
self.required_seq_len_multiple = required_seq_len_multiple
|
||||
if pos_conv_depth > 1:
|
||||
num_layers = pos_conv_depth
|
||||
k = max(3, conv_pos // num_layers)
|
||||
|
||||
def make_conv_block(e, k, g, l):
|
||||
return nn.Sequential(
|
||||
*[
|
||||
nn.Sequential(
|
||||
nn.Conv1d(
|
||||
e,
|
||||
e,
|
||||
kernel_size=k,
|
||||
padding=k // 2,
|
||||
groups=g,
|
||||
),
|
||||
utils.SamePad(k),
|
||||
utils.TransposeLast(),
|
||||
torch.nn.LayerNorm(e, elementwise_affine=False),
|
||||
utils.TransposeLast(),
|
||||
nn.GELU(),
|
||||
)
|
||||
for _ in range(l)
|
||||
]
|
||||
)
|
||||
|
||||
self.pos_conv = make_conv_block(
|
||||
self.embedding_dim, k, conv_pos_groups, num_layers
|
||||
)
|
||||
|
||||
else:
|
||||
self.pos_conv = make_conv_pos(
|
||||
self.embedding_dim,
|
||||
conv_pos,
|
||||
conv_pos_groups,
|
||||
)
|
||||
|
||||
# transformer layers
|
||||
self.layer_type = layer_type
|
||||
self.encoder_ffn_embed_dim = encoder_ffn_embed_dim
|
||||
self.encoder_attention_heads = encoder_attention_heads
|
||||
self.attention_dropout = attention_dropout
|
||||
self.activation_dropout = activation_dropout
|
||||
self.activation_fn = activation_fn
|
||||
self.layer_norm_first = layer_norm_first
|
||||
self.layerdrop = encoder_layerdrop
|
||||
self.max_positions = max_positions
|
||||
self.layers = nn.ModuleList(
|
||||
[self.build_encoder_layer() for _ in range(encoder_layers)]
|
||||
)
|
||||
self.layer_norm = torch.nn.LayerNorm(self.embedding_dim)
|
||||
|
||||
self.apply(utils.init_bert_params)
|
||||
|
||||
def forward(self, x, padding_mask=None, layer=None):
|
||||
x, layer_results = self.extract_features(x, padding_mask, layer)
|
||||
|
||||
if self.layer_norm_first and layer is None:
|
||||
x = self.layer_norm(x)
|
||||
|
||||
return x, layer_results
|
||||
|
||||
def extract_features(
|
||||
self,
|
||||
x,
|
||||
padding_mask=None,
|
||||
tgt_layer=None,
|
||||
min_layer=0,
|
||||
):
|
||||
|
||||
if padding_mask is not None:
|
||||
x[padding_mask] = 0
|
||||
|
||||
x_conv = self.pos_conv(x.transpose(1, 2))
|
||||
x_conv = x_conv.transpose(1, 2)
|
||||
x = x + x_conv
|
||||
|
||||
if not self.layer_norm_first:
|
||||
x = self.layer_norm(x)
|
||||
|
||||
# pad to the sequence length dimension
|
||||
x, pad_length = utils.pad_to_multiple(
|
||||
x, self.required_seq_len_multiple, dim=-2, value=0
|
||||
)
|
||||
if pad_length > 0 and padding_mask is None:
|
||||
padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool)
|
||||
padding_mask[:, -pad_length:] = True
|
||||
else:
|
||||
padding_mask, _ = utils.pad_to_multiple(
|
||||
padding_mask, self.required_seq_len_multiple, dim=-1, value=True
|
||||
)
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
|
||||
# B x T x C -> T x B x C
|
||||
x = x.transpose(0, 1)
|
||||
|
||||
layer_results = []
|
||||
r = None
|
||||
for i, layer in enumerate(self.layers):
|
||||
dropout_probability = np.random.random() if self.layerdrop > 0 else 1
|
||||
if not self.training or (dropout_probability > self.layerdrop):
|
||||
x, (z, lr) = layer(x, self_attn_padding_mask=padding_mask)
|
||||
if i >= min_layer:
|
||||
layer_results.append((x, z, lr))
|
||||
if i == tgt_layer:
|
||||
r = x
|
||||
break
|
||||
|
||||
if r is not None:
|
||||
x = r
|
||||
|
||||
# T x B x C -> B x T x C
|
||||
x = x.transpose(0, 1)
|
||||
|
||||
# undo paddding
|
||||
if pad_length > 0:
|
||||
x = x[:, :-pad_length]
|
||||
|
||||
def undo_pad(a, b, c):
|
||||
return (
|
||||
a[:-pad_length],
|
||||
b[:-pad_length] if b is not None else b,
|
||||
c[:-pad_length],
|
||||
)
|
||||
|
||||
layer_results = [undo_pad(*u) for u in layer_results]
|
||||
|
||||
return x, layer_results
|
||||
|
||||
def max_positions(self):
|
||||
"""Maximum output length supported by the encoder."""
|
||||
return self.max_positions
|
||||
|
||||
def upgrade_state_dict_named(self, state_dict, name):
|
||||
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
|
||||
return state_dict
|
||||
|
||||
|
||||
class TransformerSentenceEncoderLayer(nn.Module):
|
||||
"""
|
||||
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
|
||||
models.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int = 768,
|
||||
ffn_embedding_dim: int = 3072,
|
||||
num_attention_heads: int = 8,
|
||||
dropout: float = 0.1,
|
||||
attention_dropout: float = 0.1,
|
||||
activation_dropout: float = 0.1,
|
||||
activation_fn: str = "relu",
|
||||
layer_norm_first: bool = False,
|
||||
) -> None:
|
||||
|
||||
super().__init__()
|
||||
# Initialize parameters
|
||||
self.embedding_dim = embedding_dim
|
||||
self.dropout = dropout
|
||||
self.activation_dropout = activation_dropout
|
||||
|
||||
# Initialize blocks
|
||||
self.activation_fn = utils.get_activation_fn(activation_fn)
|
||||
self.self_attn = MultiheadAttention(
|
||||
self.embedding_dim,
|
||||
num_attention_heads,
|
||||
dropout=attention_dropout,
|
||||
self_attention=True,
|
||||
)
|
||||
|
||||
self.dropout1 = nn.Dropout(dropout)
|
||||
self.dropout2 = nn.Dropout(self.activation_dropout)
|
||||
self.dropout3 = nn.Dropout(dropout)
|
||||
|
||||
self.layer_norm_first = layer_norm_first
|
||||
|
||||
# layer norm associated with the self attention layer
|
||||
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embedding_dim)
|
||||
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
|
||||
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
|
||||
|
||||
# layer norm associated with the position wise feed-forward NN
|
||||
self.final_layer_norm = torch.nn.LayerNorm(self.embedding_dim)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor, # (T, B, C)
|
||||
self_attn_mask: torch.Tensor = None,
|
||||
self_attn_padding_mask: torch.Tensor = None,
|
||||
):
|
||||
"""
|
||||
LayerNorm is applied either before or after the self-attention/ffn
|
||||
modules similar to the original Transformer imlementation.
|
||||
"""
|
||||
residual = x
|
||||
|
||||
if self.layer_norm_first:
|
||||
x = self.self_attn_layer_norm(x)
|
||||
x, attn = self.self_attn(
|
||||
query=x,
|
||||
key=x,
|
||||
value=x,
|
||||
key_padding_mask=self_attn_padding_mask,
|
||||
attn_mask=self_attn_mask,
|
||||
need_weights=False,
|
||||
)
|
||||
x = self.dropout1(x)
|
||||
x = residual + x
|
||||
|
||||
residual = x
|
||||
x = self.final_layer_norm(x)
|
||||
x = self.activation_fn(self.fc1(x))
|
||||
x = self.dropout2(x)
|
||||
x = self.fc2(x)
|
||||
|
||||
layer_result = x
|
||||
|
||||
x = self.dropout3(x)
|
||||
x = residual + x
|
||||
else:
|
||||
x, attn = self.self_attn(
|
||||
query=x,
|
||||
key=x,
|
||||
value=x,
|
||||
key_padding_mask=self_attn_padding_mask,
|
||||
need_weights=False,
|
||||
)
|
||||
|
||||
x = self.dropout1(x)
|
||||
x = residual + x
|
||||
|
||||
x = self.self_attn_layer_norm(x)
|
||||
|
||||
residual = x
|
||||
x = self.activation_fn(self.fc1(x))
|
||||
x = self.dropout2(x)
|
||||
x = self.fc2(x)
|
||||
|
||||
layer_result = x
|
||||
|
||||
x = self.dropout3(x)
|
||||
x = residual + x
|
||||
x = self.final_layer_norm(x)
|
||||
|
||||
return x, (attn, layer_result)
|
||||
Reference in New Issue
Block a user