mirror of
https://github.com/HumanAIGC/lite-avatar.git
synced 2026-02-05 18:09:20 +08:00
add files
This commit is contained in:
40
funasr_local/datasets/large_datasets/utils/clipping.py
Normal file
40
funasr_local/datasets/large_datasets/utils/clipping.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from funasr_local.datasets.collate_fn import crop_to_max_size
|
||||
|
||||
|
||||
def clipping(data):
|
||||
assert isinstance(data, list)
|
||||
assert "key" in data[0]
|
||||
|
||||
keys = [x["key"] for x in data]
|
||||
|
||||
batch = {}
|
||||
data_names = data[0].keys()
|
||||
for data_name in data_names:
|
||||
if data_name == "key":
|
||||
continue
|
||||
else:
|
||||
if data[0][data_name].dtype.kind == "i":
|
||||
tensor_type = torch.int64
|
||||
else:
|
||||
tensor_type = torch.float32
|
||||
|
||||
tensor_list = [torch.tensor(np.copy(d[data_name]), dtype=tensor_type) for d in data]
|
||||
tensor_lengths = torch.tensor([len(d[data_name]) for d in data], dtype=torch.int32)
|
||||
|
||||
length_clip = min(tensor_lengths)
|
||||
tensor_clip = tensor_list[0].new_zeros(len(tensor_list), length_clip, tensor_list[0].shape[1])
|
||||
for i, (tensor, length) in enumerate(zip(tensor_list, tensor_lengths)):
|
||||
diff = length - length_clip
|
||||
assert diff >= 0
|
||||
if diff == 0:
|
||||
tensor_clip[i] = tensor
|
||||
else:
|
||||
tensor_clip[i] = crop_to_max_size(tensor, length_clip)
|
||||
|
||||
batch[data_name] = tensor_clip
|
||||
batch[data_name + "_lengths"] = torch.tensor([tensor.shape[0] for tensor in tensor_clip], dtype=torch.long)
|
||||
|
||||
return keys, batch
|
||||
26
funasr_local/datasets/large_datasets/utils/filter.py
Normal file
26
funasr_local/datasets/large_datasets/utils/filter.py
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
|
||||
def filter(data,
|
||||
speech_length_min=100,
|
||||
speech_length_max=15000,
|
||||
token_length_min=0,
|
||||
token_length_max=200):
|
||||
assert "speech" in data or "text" in data
|
||||
|
||||
if "speech" in data and "text" in data:
|
||||
if "sampling_rate" in data:
|
||||
speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
|
||||
else:
|
||||
speech_length = data["speech"].shape[0]
|
||||
num_tokens = len(data['text'])
|
||||
return speech_length_min < speech_length < speech_length_max and token_length_min < num_tokens < token_length_max
|
||||
elif "speech" in data:
|
||||
if "sampling_rate" in data:
|
||||
speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
|
||||
else:
|
||||
speech_length = data["speech"].shape[0]
|
||||
return speech_length_min < speech_length < speech_length_max
|
||||
else:
|
||||
num_tokens = len(data['text'])
|
||||
return token_length_min < num_tokens < token_length_max
|
||||
30
funasr_local/datasets/large_datasets/utils/low_frame_rate.py
Normal file
30
funasr_local/datasets/large_datasets/utils/low_frame_rate.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
def build_LFR_features(data, m, n):
|
||||
"""
|
||||
Actually, this implements stacking frames and skipping frames.
|
||||
if m = 1 and n = 1, just return the origin features.
|
||||
if m = 1 and n > 1, it works like skipping.
|
||||
if m > 1 and n = 1, it works like stacking but only support right frames.
|
||||
if m > 1 and n > 1, it works like LFR.
|
||||
|
||||
Args:
|
||||
inputs_batch: inputs is T x D np.ndarray
|
||||
m: number of frames to stack
|
||||
n: number of frames to skip
|
||||
"""
|
||||
|
||||
LFR_inputs = []
|
||||
T = data.shape[0]
|
||||
T_lfr = int(np.ceil(T / n))
|
||||
for i in range(T_lfr):
|
||||
if m <= T - i * n:
|
||||
LFR_inputs.append(np.hstack(data[i*n:i*n+m]))
|
||||
else:
|
||||
num_padding = m - (T - i * n)
|
||||
frame = np.hstack(data[i*n:])
|
||||
for _ in range(num_padding):
|
||||
frame = np.hstack((frame, data[-1]))
|
||||
LFR_inputs.append(frame)
|
||||
return np.vstack(LFR_inputs)
|
||||
34
funasr_local/datasets/large_datasets/utils/padding.py
Normal file
34
funasr_local/datasets/large_datasets/utils/padding.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.nn.utils.rnn import pad_sequence
|
||||
|
||||
|
||||
def padding(data, float_pad_value=0.0, int_pad_value=-1):
|
||||
assert isinstance(data, list)
|
||||
assert "key" in data[0]
|
||||
assert "speech" in data[0] or "text" in data[0]
|
||||
|
||||
keys = [x["key"] for x in data]
|
||||
|
||||
batch = {}
|
||||
data_names = data[0].keys()
|
||||
for data_name in data_names:
|
||||
if data_name == "key" or data_name =="sampling_rate":
|
||||
continue
|
||||
else:
|
||||
if data[0][data_name].dtype.kind == "i":
|
||||
pad_value = int_pad_value
|
||||
tensor_type = torch.int64
|
||||
else:
|
||||
pad_value = float_pad_value
|
||||
tensor_type = torch.float32
|
||||
|
||||
tensor_list = [torch.tensor(np.copy(d[data_name]), dtype=tensor_type) for d in data]
|
||||
tensor_lengths = torch.tensor([len(d[data_name]) for d in data], dtype=torch.int32)
|
||||
tensor_pad = pad_sequence(tensor_list,
|
||||
batch_first=True,
|
||||
padding_value=pad_value)
|
||||
batch[data_name] = tensor_pad
|
||||
batch[data_name + "_lengths"] = tensor_lengths
|
||||
|
||||
return keys, batch
|
||||
81
funasr_local/datasets/large_datasets/utils/tokenize.py
Normal file
81
funasr_local/datasets/large_datasets/utils/tokenize.py
Normal file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env python
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
def forward_segment(text, seg_dict):
|
||||
word_list = []
|
||||
i = 0
|
||||
while i < len(text):
|
||||
longest_word = text[i]
|
||||
for j in range(i + 1, len(text) + 1):
|
||||
word = text[i:j]
|
||||
if word in seg_dict:
|
||||
if len(word) > len(longest_word):
|
||||
longest_word = word
|
||||
word_list.append(longest_word)
|
||||
i += len(longest_word)
|
||||
return word_list
|
||||
|
||||
def seg_tokenize(txt, seg_dict):
|
||||
pattern = re.compile(r'^[\u4E00-\u9FA50-9]+$')
|
||||
out_txt = ""
|
||||
for word in txt:
|
||||
word = word.lower()
|
||||
if word in seg_dict:
|
||||
out_txt += seg_dict[word] + " "
|
||||
else:
|
||||
if pattern.match(word):
|
||||
for char in word:
|
||||
if char in seg_dict:
|
||||
out_txt += seg_dict[char] + " "
|
||||
else:
|
||||
out_txt += "<unk>" + " "
|
||||
else:
|
||||
out_txt += "<unk>" + " "
|
||||
return out_txt.strip().split()
|
||||
|
||||
def tokenize(data,
|
||||
vocab=None,
|
||||
seg_dict=None,
|
||||
punc_dict=None,
|
||||
bpe_tokenizer=None):
|
||||
assert "text" in data
|
||||
assert isinstance(vocab, dict)
|
||||
text = data["text"]
|
||||
token = []
|
||||
vad = -2
|
||||
|
||||
if bpe_tokenizer is not None:
|
||||
text = bpe_tokenizer.text2tokens("".join(text))
|
||||
|
||||
if seg_dict is not None:
|
||||
assert isinstance(seg_dict, dict)
|
||||
text = seg_tokenize(text, seg_dict)
|
||||
|
||||
length = len(text)
|
||||
for i in range(length):
|
||||
x = text[i]
|
||||
if i == length-1 and "punc" in data and x.startswith("vad:"):
|
||||
vad = x[4:]
|
||||
if len(vad) == 0:
|
||||
vad = -1
|
||||
else:
|
||||
vad = int(vad)
|
||||
elif x in vocab:
|
||||
token.append(vocab[x])
|
||||
else:
|
||||
token.append(vocab['<unk>'])
|
||||
|
||||
if "punc" in data and punc_dict is not None:
|
||||
punc_token = []
|
||||
for punc in data["punc"]:
|
||||
if punc in punc_dict:
|
||||
punc_token.append(punc_dict[punc])
|
||||
else:
|
||||
punc_token.append(punc_dict["_"])
|
||||
data["punc"] = np.array(punc_token)
|
||||
|
||||
data["text"] = np.array(token)
|
||||
if vad is not -2:
|
||||
data["vad_indexes"]=np.array([vad], dtype=np.int64)
|
||||
return data
|
||||
Reference in New Issue
Block a user