mirror of
https://github.com/HumanAIGC/lite-avatar.git
synced 2026-02-05 09:59:18 +08:00
add files
This commit is contained in:
3
weights/model_1.onnx
Normal file
3
weights/model_1.onnx
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:312f0213a23a1c17ecb66f9edf8413b8ef193c0d4d2f5f9dd6714a178492c34c
|
||||
size 184415825
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,503 @@
|
||||
---
|
||||
tasks:
|
||||
- auto-speech-recognition
|
||||
domain:
|
||||
- audio
|
||||
model-type:
|
||||
- Non-autoregressive
|
||||
frameworks:
|
||||
- pytorch
|
||||
backbone:
|
||||
- transformer/conformer
|
||||
metrics:
|
||||
- CER
|
||||
license: Apache License 2.0
|
||||
language:
|
||||
- cn
|
||||
tags:
|
||||
- Paraformer
|
||||
- Alibaba
|
||||
- INTERSPEECH 2022
|
||||
datasets:
|
||||
train:
|
||||
- 60,000 hour industrial Mandarin task
|
||||
test:
|
||||
- AISHELL-1 dev/test
|
||||
- AISHELL-2 dev_android/dev_ios/dev_mic/test_android/test_ios/test_mic
|
||||
- WentSpeech dev/test_meeting/test_net
|
||||
- SpeechIO TIOBE
|
||||
- 60,000 hour industrial Mandarin task
|
||||
indexing:
|
||||
results:
|
||||
- task:
|
||||
name: Automatic Speech Recognition
|
||||
dataset:
|
||||
name: 60,000 hour industrial Mandarin task
|
||||
type: audio # optional
|
||||
args: 16k sampling rate, 8404 characters # optional
|
||||
metrics:
|
||||
- type: CER
|
||||
value: 8.53% # float
|
||||
description: greedy search, withou lm, avg.
|
||||
args: default
|
||||
- type: RTF
|
||||
value: 0.0251 # float
|
||||
description: GPU inference on V100
|
||||
args: batch_size=1
|
||||
widgets:
|
||||
- task: auto-speech-recognition
|
||||
inputs:
|
||||
- type: audio
|
||||
name: input
|
||||
title: 音频
|
||||
examples:
|
||||
- name: 1
|
||||
title: 示例1
|
||||
inputs:
|
||||
- name: input
|
||||
data: git://example/asr_example.wav
|
||||
inferencespec:
|
||||
cpu: 8 #CPU数量
|
||||
memory: 4096
|
||||
finetune-support: True
|
||||
---
|
||||
|
||||
|
||||
# Paraformer-large模型介绍
|
||||
|
||||
## Highlights
|
||||
- 热词版本:[Paraformer-large热词版模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary)支持热词定制功能,基于提供的热词列表进行激励增强,提升热词的召回率和准确率。
|
||||
- 长音频版本:[Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary),集成VAD、ASR、标点与时间戳功能,可直接对时长为数小时音频进行识别,并输出带标点文字与时间戳。
|
||||
|
||||
## Release Notes
|
||||
|
||||
- 2023年2月(2月17号发布):[funasr-0.2.0](https://github.com/alibaba-damo-academy/FunASR/tree/main), modelscope-1.3.0
|
||||
- 功能完善:
|
||||
- 新增加模型导出功能,Modelscope中所有Paraformer模型与本地finetune模型,支持一键导出[onnx格式模型与torchscripts格式模型](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/export),用于模型部署。
|
||||
- 新增加Paraformer模型[onnxruntime部署功能](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/onnxruntime/paraformer/rapid_paraformer),无须安装Modelscope与FunASR,即可部署,cpu实测,onnxruntime推理速度提升近3倍(rtf: 0.110->0.038)。
|
||||
- 新增加[grpc服务功能](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/grpc),支持对Modelscope推理pipeline进行服务部署,也支持对onnxruntime进行服务部署。
|
||||
- 优化[Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)时间戳,对badcase时间戳预测准确率有较大幅度提升,平均首尾时间戳偏移74.7ms,[详见论文](https://arxiv.org/abs/2301.12343)。
|
||||
- 新增加任意VAD模型、ASR模型与标点模型自由组合功能,可以自由组合Modelscope中任意模型以及本地finetune后的模型进行推理,[用法示例](https://github.com/alibaba-damo-academy/FunASR/discussions/134)。
|
||||
- 优化[标点通用模型](https://www.modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/summary),增加标点召回和精度,修复缺少标点
|
||||
等问题。
|
||||
- 新增加采样率自适应功能,任意输入采样率音频会自动匹配到模型采样率;新增加多种语音格式支持,如,mp3、flac、ogg、opus等。
|
||||
|
||||
- 上线新模型:
|
||||
- [Paraformer-large热词模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary),可实现热词定制化,基于提供的热词列表,对热词进行激励增强,提升模型对热词的召回。
|
||||
- [MFCCA多通道多说话人识别模型](https://www.modelscope.cn/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/summary),与西工大音频语音与语言处理研究组合作论文,一种基于多帧跨通道注意力机制的多通道语音识别模型。
|
||||
- [8k语音端点检测VAD模型](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-8k-common/summary),可用于检测长语音片段中有效语音的起止时间点,支持流式输入,最小支持10ms语音输入流。
|
||||
- UniASR流式离线一体化模型:
|
||||
[16k UniASR闽南语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/summary)、
|
||||
[16k UniASR法语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-online/summary)、
|
||||
[16k UniASR德语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online/summary)、
|
||||
[16k UniASR越南语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online/summary)、
|
||||
[16k UniASR波斯语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online/summary)。
|
||||
- [基于Data2vec结构无监督预训练Paraformer模型](https://www.modelscope.cn/models/damo/speech_data2vec_pretrain-paraformer-zh-cn-aishell2-16k/summary),采用Data2vec无监督预训练初值模型,在AISHELL-1数据中finetune Paraformer模型。
|
||||
|
||||
- 2023年1月(1月16号发布):[funasr-0.1.6](https://github.com/alibaba-damo-academy/FunASR/tree/main), modelscope-1.2.0
|
||||
- 上线新模型:
|
||||
- [Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary),集成VAD、ASR、标点与时间戳功能,可直接对时长为数小时音频进行识别,并输出带标点文字与时间戳。
|
||||
- [中文无监督预训练Data2vec模型](https://www.modelscope.cn/models/damo/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch/summary),采用Data2vec结构,基于AISHELL-2数据的中文无监督预训练模型,支持ASR或者下游任务微调模型。
|
||||
- [16k语音端点检测VAD模型](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary),可用于检测长语音片段中有效语音的起止时间点。
|
||||
- [中文标点预测通用模型](https://www.modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/summary),可用于语音识别模型输出文本的标点预测。
|
||||
- [8K UniASR流式模型](https://www.modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/summary),[8K UniASR模型](https://www.modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/summary),一种流式与离线一体化语音识别模型,进行流式语音识别的同时,能够以较低延时输出离线识别结果来纠正预测文本。
|
||||
- Paraformer-large基于[AISHELL-1微调模型](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/summary)、[AISHELL-2微调模型](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/summary),将Paraformer-large模型分别基于AISHELL-1与AISHELL-2数据微调。
|
||||
- [说话人确认模型](https://www.modelscope.cn/models/damo/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/summary) ,可用于说话人确认,也可以用来做说话人特征提取。
|
||||
- [小尺寸设备端Paraformer指令词模型](https://www.modelscope.cn/models/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/summary),Paraformer-tiny指令词版本,使用小参数量模型支持指令词识别。
|
||||
- 将原TensorFlow模型升级为Pytorch模型,进行推理,并支持微调定制,包括:
|
||||
- 16K 模型:[Paraformer中文](https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary)、
|
||||
[Paraformer-large中文](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary)、
|
||||
[UniASR中文](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/summary)、
|
||||
[UniASR-large中文](https://modelscope.cn/models/damo/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/summary)、
|
||||
[UniASR中文流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/summary)、
|
||||
[UniASR方言](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-offline/summary)、
|
||||
[UniASR方言流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/summary)、
|
||||
[UniASR日语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/summary)、
|
||||
[UniASR日语流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online/summary)、
|
||||
[UniASR印尼语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-offline/summary)、
|
||||
[UniASR印尼语流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online/summary)、
|
||||
[UniASR葡萄牙语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/summary)、
|
||||
[UniASR葡萄牙语流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online/summary)、
|
||||
[UniASR英文](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-offline/summary)、
|
||||
[UniASR英文流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-online/summary)、
|
||||
[UniASR俄语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-offline/summary)、
|
||||
[UniASR俄语流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-online/summary)、
|
||||
[UniASR韩语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-offline/summary)、
|
||||
[UniASR韩语流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-online/summary)、
|
||||
[UniASR西班牙语](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-offline/summary)、
|
||||
[UniASR西班牙语流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-online/summary)、
|
||||
[UniASR粤语简体](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-offline/files)、
|
||||
[UniASR粤语简体流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online/files)、
|
||||
- 8K 模型:[Paraformer中文](https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1/summary)、
|
||||
[UniASR中文](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline/summary)、
|
||||
[UniASR中文流式模型](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline/summary)
|
||||
|
||||
[//]: # ( - 功能完善:)
|
||||
|
||||
[//]: # ( - Modelscope模型推理pipeline,Paraformer模型新增加batch级解码;新增加多种输入音频方式,如wav.scp、音频bytes、音频采样点、WAV格式等。)
|
||||
|
||||
[//]: # ( - 新增加基于ModelScope微调定制pipeline,其中,,加快推理速度。)
|
||||
|
||||
[//]: # ( - [Paraformer-large模型](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary),新增加基于ModelScope微调定制模型,新增加batch级解码,加快推理速度。)
|
||||
|
||||
[//]: # ( - [AISHELL-1学术集Paraformer模型](https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary),)
|
||||
|
||||
[//]: # ( [AISHELL-1学术集ParaformerBert模型](https://modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary),)
|
||||
|
||||
[//]: # ( [AISHELL-1学术集Conformer模型](https://modelscope.cn/models/damo/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary)、)
|
||||
|
||||
[//]: # ( [AISHELL-2学术集Paraformer模型](https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary),)
|
||||
|
||||
[//]: # ( [AISHELL-2学术集ParaformerBert模型](https://www.modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary)、)
|
||||
|
||||
[//]: # ( [AISHELL-2学术集Conformer模型](https://www.modelscope.cn/models/damo/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary),)
|
||||
[//]: # ( 新增加基于ModelScope微调定制模型,其中,Paraformer与ParaformerBert模型新增加batch级解码,加快推理速度。)
|
||||
|
||||
|
||||
## 项目介绍
|
||||
|
||||
Paraformer是达摩院语音团队提出的一种高效的非自回归端到端语音识别框架。本项目为Paraformer中文通用语音识别模型,采用工业级数万小时的标注音频进行模型训练,保证了模型的通用识别效果。模型可以被应用于语音输入法、语音导航、智能会议纪要等场景。
|
||||
|
||||
<p align="center">
|
||||
<img src="fig/struct.png" alt="Paraformer模型结构" width="500" />
|
||||
|
||||
|
||||
Paraformer模型结构如上图所示,由 Encoder、Predictor、Sampler、Decoder 与 Loss function 五部分组成。Encoder可以采用不同的网络结构,例如self-attention,conformer,SAN-M等。Predictor 为两层FFN,预测目标文字个数以及抽取目标文字对应的声学向量。Sampler 为无可学习参数模块,依据输入的声学向量和目标向量,生产含有语义的特征向量。Decoder 结构与自回归模型类似,为双向建模(自回归为单向建模)。Loss function 部分,除了交叉熵(CE)与 MWER 区分性优化目标,还包括了 Predictor 优化目标 MAE。
|
||||
|
||||
|
||||
其核心点主要有:
|
||||
- Predictor 模块:基于 Continuous integrate-and-fire (CIF) 的 预测器 (Predictor) 来抽取目标文字对应的声学特征向量,可以更加准确的预测语音中目标文字个数。
|
||||
- Sampler:通过采样,将声学特征向量与目标文字向量变换成含有语义信息的特征向量,配合双向的 Decoder 来增强模型对于上下文的建模能力。
|
||||
- 基于负样本采样的 MWER 训练准则。
|
||||
|
||||
更详细的细节见:
|
||||
- 论文: [Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition](https://arxiv.org/abs/2206.08317)
|
||||
- 论文解读:[Paraformer: 高识别率、高计算效率的单轮非自回归端到端语音识别模型](https://mp.weixin.qq.com/s/xQ87isj5_wxWiQs4qUXtVw)
|
||||
|
||||
|
||||
## 如何使用与训练自己的模型
|
||||
|
||||
本项目提供的预训练模型是基于大数据训练的通用领域识别模型,开发者可以基于此模型进一步利用ModelScope的微调功能或者本项目对应的Github代码仓库[FunASR](https://github.com/alibaba-damo-academy/FunASR)进一步进行模型的领域定制化。
|
||||
|
||||
### 在Notebook中开发
|
||||
|
||||
对于有开发需求的使用者,特别推荐您使用Notebook进行离线处理。先登录ModelScope账号,点击模型页面右上角的“在Notebook中打开”按钮出现对话框,首次使用会提示您关联阿里云账号,按提示操作即可。关联账号后可进入选择启动实例界面,选择计算资源,建立实例,待实例创建完成后进入开发环境,进行调用。
|
||||
|
||||
#### 基于ModelScope进行推理
|
||||
|
||||
- 推理支持音频格式如下:
|
||||
- wav文件路径,例如:data/test/audios/asr_example.wav
|
||||
- pcm文件路径,例如:data/test/audios/asr_example.pcm
|
||||
- wav文件url,例如:https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav
|
||||
- wav二进制数据,格式bytes,例如:用户直接从文件里读出bytes数据或者是麦克风录出bytes数据。
|
||||
- 已解析的audio音频,例如:audio, rate = soundfile.read("asr_example_zh.wav"),类型为numpy.ndarray或者torch.Tensor。
|
||||
- wav.scp文件,需符合如下要求:
|
||||
|
||||
```sh
|
||||
cat wav.scp
|
||||
asr_example1 data/test/audios/asr_example1.wav
|
||||
asr_example2 data/test/audios/asr_example2.wav
|
||||
...
|
||||
```
|
||||
|
||||
- 若输入格式wav文件url,api调用方式可参考如下范例:
|
||||
|
||||
```python
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.utils.constant import Tasks
|
||||
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
|
||||
|
||||
rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
|
||||
print(rec_result)
|
||||
```
|
||||
|
||||
- 输入音频为pcm格式,调用api时需要传入音频采样率参数audio_fs,例如:
|
||||
|
||||
```python
|
||||
rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.pcm', audio_fs=16000)
|
||||
```
|
||||
|
||||
- 输入音频为wav格式,api调用方式可参考如下范例:
|
||||
|
||||
```python
|
||||
rec_result = inference_pipeline(audio_in='asr_example_zh.wav')
|
||||
```
|
||||
|
||||
- 若输入格式为文件wav.scp(注:文件名需要以.scp结尾),可添加 output_dir 参数将识别结果写入文件中,api调用方式可参考如下范例:
|
||||
|
||||
```python
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
|
||||
output_dir='./output_dir')
|
||||
|
||||
inference_pipeline(audio_in="wav.scp")
|
||||
```
|
||||
识别结果输出路径结构如下:
|
||||
|
||||
```sh
|
||||
tree output_dir/
|
||||
output_dir/
|
||||
└── 1best_recog
|
||||
├── rtf
|
||||
├── score
|
||||
├── text
|
||||
└── time_stamp
|
||||
|
||||
1 directory, 4 files
|
||||
```
|
||||
rtf:计算过程耗时统计
|
||||
|
||||
score:识别路径得分
|
||||
|
||||
text:语音识别结果文件
|
||||
|
||||
time_stamp:时间戳结果文件
|
||||
|
||||
- 若输入音频为已解析的audio音频,api调用方式可参考如下范例:
|
||||
|
||||
```python
|
||||
import soundfile
|
||||
|
||||
waveform, sample_rate = soundfile.read("asr_example_zh.wav")
|
||||
rec_result = inference_pipeline(audio_in=waveform)
|
||||
```
|
||||
|
||||
- ASR、VAD、PUNC模型自由组合
|
||||
|
||||
可根据使用需求对VAD和PUNC标点模型进行自由组合,使用方式如下:
|
||||
```python
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
|
||||
vad_model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',
|
||||
vad_model_revision="v1.1.8",
|
||||
punc_model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',
|
||||
punc_model_revision="v1.1.6",
|
||||
)
|
||||
```
|
||||
若不使用PUNC模型,可配置punc_model="",或不传入punc_model参数,如需加入LM模型,可增加配置lm_model='damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch'。
|
||||
|
||||
长音频版本模型默认开启时间戳,若不使用时间戳,可通过传入参数param_dict['use_timestamp'] = False关闭时间戳,使用方式如下:
|
||||
```python
|
||||
param_dict['use_timestamp'] = False
|
||||
rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav', param_dict=param_dict)
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
#### 基于ModelScope进行微调
|
||||
|
||||
- 基于ModelScope上数据集进行微调:
|
||||
|
||||
以[AISHELL-1](https://www.modelscope.cn/datasets/speech_asr/speech_asr_aishell1_trainsets/summary)数据集为例,完整数据集已经上传ModelScope,可通过数据集英文名(speech_asr_aishell1_trainsets)搜索:
|
||||
|
||||
|
||||
```python
|
||||
import os
|
||||
from modelscope.metainfo import Trainers
|
||||
from modelscope.trainers import build_trainer
|
||||
from modelscope.msdatasets.audio.asr_dataset import ASRDataset
|
||||
|
||||
def modelscope_finetune(params):
|
||||
if not os.path.exists(params.output_dir):
|
||||
os.makedirs(params.output_dir, exist_ok=True)
|
||||
# dataset split ["train", "validation"]
|
||||
ds_dict = ASRDataset.load(params.data_path, namespace='speech_asr')
|
||||
kwargs = dict(
|
||||
model=params.model,
|
||||
data_dir=ds_dict,
|
||||
dataset_type=params.dataset_type,
|
||||
work_dir=params.output_dir,
|
||||
batch_bins=params.batch_bins,
|
||||
max_epoch=params.max_epoch,
|
||||
lr=params.lr)
|
||||
trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
|
||||
trainer.train()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from funasr.utils.modelscope_param import modelscope_args
|
||||
params = modelscope_args(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
|
||||
params.output_dir = "./checkpoint" # 模型保存路径
|
||||
params.data_path = "speech_asr_aishell1_trainsets" # 数据路径,可以为modelscope中已上传数据,也可以是本地数据
|
||||
params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large
|
||||
params.batch_bins = 2000 # batch size,如果dataset_type="small",batch_bins单位为fbank特征帧数,如果dataset_type="large",batch_bins单位为毫秒,
|
||||
params.max_epoch = 50 # 最大训练轮数
|
||||
params.lr = 0.00005 # 设置学习率
|
||||
|
||||
modelscope_finetune(params)
|
||||
```
|
||||
|
||||
|
||||
可将上述代码保存为py文件(如finetune.py),直接python finetune.py运行;若使用多卡进行训练,如下命令:
|
||||
|
||||
```sh
|
||||
CUDA_VISIBLE_DEVICES=1,2 python -m torch.distributed.launch --nproc_per_node 2 finetune.py > log.txt 2>&1
|
||||
```
|
||||
|
||||
- 基于私有数据集进行微调:
|
||||
只需要设置本地数据存放路径即可:
|
||||
```python
|
||||
params.data_path = "speech_asr_aishell1_trainsets"
|
||||
```
|
||||
|
||||
私有数据集格式按如下准备:
|
||||
```sh
|
||||
tree ./example_data/
|
||||
./example_data/
|
||||
├── validation
|
||||
│ ├── text
|
||||
│ └── wav.scp
|
||||
└── train
|
||||
├── text
|
||||
└── wav.scp
|
||||
2 directories, 4 files
|
||||
```
|
||||
|
||||
其中,text文件中存放音频标注,wav.scp文件中存放wav音频绝对路径,样例如下:
|
||||
|
||||
```sh
|
||||
cat ./example_data/text
|
||||
BAC009S0002W0122 而 对 楼 市 成 交 抑 制 作 用 最 大 的 限 购
|
||||
BAC009S0002W0123 也 成 为 地 方 政 府 的 眼 中 钉
|
||||
|
||||
cat ./example_data/wav.scp
|
||||
BAC009S0002W0122 /mnt/data/wav/train/S0002/BAC009S0002W0122.wav
|
||||
BAC009S0002W0123 /mnt/data/wav/train/S0002/BAC009S0002W0123.wav
|
||||
```
|
||||
|
||||
|
||||
### 在本地机器中开发
|
||||
|
||||
#### 基于ModelScope进行微调和推理
|
||||
|
||||
支持基于ModelScope上数据集及私有数据集进行定制微调和推理,使用方式同Notebook中开发。
|
||||
|
||||
#### 基于FunASR进行微调和推理
|
||||
|
||||
FunASR框架支持魔搭社区开源的工业级的语音识别模型的training & finetuning,使得研究人员和开发者可以更加便捷的进行语音识别模型的研究和生产,目前已在Github开源:https://github.com/alibaba-damo-academy/FunASR 。若在使用过程中遇到任何问题,欢迎联系我们:[联系方式](https://github.com/alibaba-damo-academy/FunASR/blob/main/docs/images/dingding.jpg)
|
||||
|
||||
#### FunASR框架安装
|
||||
|
||||
- 安装FunASR和ModelScope,[详见](https://github.com/alibaba-damo-academy/FunASR/wiki)
|
||||
|
||||
```sh
|
||||
pip install "modelscope[audio_asr]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
git clone https://github.com/alibaba/FunASR.git
|
||||
cd FunASR
|
||||
pip install --editable ./
|
||||
```
|
||||
|
||||
#### 基于FunASR进行推理
|
||||
|
||||
接下来会以私有数据集为例,介绍如何在FunASR框架中使用Paraformer-large进行推理以及微调。
|
||||
|
||||
```sh
|
||||
cd egs_modelscope/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
||||
python infer.py
|
||||
```
|
||||
|
||||
#### 基于FunASR进行微调
|
||||
```sh
|
||||
cd egs_modelscope/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
||||
python finetune.py
|
||||
```
|
||||
|
||||
若修改输出路径、数据路径、采样率、batch_size等配置及使用多卡训练,可参照在Notebook开发中私有数据微调部分的代码,修改finetune.py文件中配置。
|
||||
|
||||
|
||||
## Benchmark
|
||||
结合大数据、大模型优化的Paraformer在一序列语音识别的benchmark上获得当前SOTA的效果,以下展示学术数据集AISHELL-1、AISHELL-2、WenetSpeech,公开评测项目SpeechIO TIOBE白盒测试场景的效果。在学术界常用的中文语音识别评测任务中,其表现远远超于目前公开发表论文中的结果,远好于单独封闭数据集上的模型。
|
||||
|
||||
### AISHELL-1
|
||||
|
||||
| AISHELL-1 test | w/o LM | w/ LM |
|
||||
|:------------------------------------------------:|:-------------------------------------:|:-------------------------------------:|
|
||||
| <div style="width: 150pt">Espnet</div> | <div style="width: 150pt">4.90</div> | <div style="width: 150pt">4.70</div> |
|
||||
| <div style="width: 150pt">Wenet</div> | <div style="width: 150pt">4.61</div> | <div style="width: 150pt">4.36</div> |
|
||||
| <div style="width: 150pt">K2</div> | <div style="width: 150pt">-</div> | <div style="width: 150pt">4.26</div> |
|
||||
| <div style="width: 150pt">Blockformer</div> | <div style="width: 150pt">4.29</div> | <div style="width: 150pt">4.05</div> |
|
||||
| <div style="width: 150pt">Paraformer-large</div> | <div style="width: 150pt">1.95</div> | <div style="width: 150pt">1.68</div> |
|
||||
|
||||
### AISHELL-2
|
||||
|
||||
| | dev_ios| test_android| test_ios|test_mic|
|
||||
|:-------------------------------------------------:|:-------------------------------------:|:-------------------------------------:|:------------------------------------:|:------------------------------------:|
|
||||
| <div style="width: 150pt">Espnet</div> | <div style="width: 70pt">5.40</div> |<div style="width: 70pt">6.10</div> |<div style="width: 70pt">5.70</div> |<div style="width: 70pt">6.10</div> |
|
||||
| <div style="width: 150pt">WeNet</div> | <div style="width: 70pt">-</div> |<div style="width: 70pt">-</div> |<div style="width: 70pt">5.39</div> |<div style="width: 70pt">-</div> |
|
||||
| <div style="width: 150pt">Paraformer-large</div> | <div style="width: 70pt">2.80</div> |<div style="width: 70pt">3.13</div> |<div style="width: 70pt">2.85</div> |<div style="width: 70pt">3.06</div> |
|
||||
|
||||
|
||||
### Wenetspeech
|
||||
|
||||
| | dev| test_meeting| test_net|
|
||||
|:-------------------------------------------------:|:-------------------------------------:|:-------------------------------------:|:------------------------------------:|
|
||||
| <div style="width: 150pt">Espnet</div> | <div style="width: 100pt">9.70</div> |<div style="width: 100pt">15.90</div> |<div style="width: 100pt">8.80</div> |
|
||||
| <div style="width: 150pt">WeNet</div> | <div style="width: 100pt">8.60</div> |<div style="width: 100pt">17.34</div> |<div style="width: 100pt">9.26</div> |
|
||||
| <div style="width: 150pt">K2</div> | <div style="width: 100pt">7.76</div> |<div style="width: 100pt">13.41</div> |<div style="width: 100pt">8.71</div> |
|
||||
| <div style="width: 150pt">Paraformer-large</div> | <div style="width: 100pt">3.57</div> |<div style="width: 100pt">6.97</div> |<div style="width: 100pt">6.74</div> |
|
||||
|
||||
### SpeechIO TIOBE
|
||||
|
||||
Paraformer-large模型结合Transformer-LM模型做shallow fusion,在公开评测项目SpeechIO TIOBE白盒测试场景上获得当前SOTA的效果,目前[Transformer-LM模型](https://modelscope.cn/models/damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/summary)已在ModelScope上开源,以下展示SpeechIO TIOBE白盒测试场景without LM、with Transformer-LM的效果:
|
||||
|
||||
- Decode config w/o LM:
|
||||
- Decode without LM
|
||||
- Beam size: 1
|
||||
- Decode config w/ LM:
|
||||
- Decode with [Transformer-LM](https://modelscope.cn/models/damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/summary)
|
||||
- Beam size: 10
|
||||
- LM weight: 0.15
|
||||
|
||||
| testset | w/o LM | w/ LM |
|
||||
|:------------------:|:----:|:----:|
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00001</div>| <div style="width: 150pt">0.49</div> | <div style="width: 150pt">0.35</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00002</div>| <div style="width: 150pt">3.23</div> | <div style="width: 150pt">2.86</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00003</div>| <div style="width: 150pt">1.13</div> | <div style="width: 150pt">0.80</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00004</div>| <div style="width: 150pt">1.33</div> | <div style="width: 150pt">1.10</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00005</div>| <div style="width: 150pt">1.41</div> | <div style="width: 150pt">1.18</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00006</div>| <div style="width: 150pt">5.25</div> | <div style="width: 150pt">4.85</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00007</div>| <div style="width: 150pt">5.51</div> | <div style="width: 150pt">4.97</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00008</div>| <div style="width: 150pt">3.69</div> | <div style="width: 150pt">3.18</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH00009</div>| <div style="width: 150pt">3.02</div> | <div style="width: 150pt">2.78</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH000010</div>| <div style="width: 150pt">3.35</div> | <div style="width: 150pt">2.99</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH000011</div>| <div style="width: 150pt">1.54</div> | <div style="width: 150pt">1.25</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH000012</div>| <div style="width: 150pt">2.06</div> | <div style="width: 150pt">1.68</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH000013</div>| <div style="width: 150pt">2.57</div> | <div style="width: 150pt">2.25</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH000014</div>| <div style="width: 150pt">3.86</div> | <div style="width: 150pt">3.08</div> |
|
||||
|<div style="width: 200pt">SPEECHIO_ASR_ZH000015</div>| <div style="width: 150pt">3.34</div> | <div style="width: 150pt">2.67</div> |
|
||||
|
||||
|
||||
## 使用方式以及适用范围
|
||||
|
||||
运行范围
|
||||
- 现阶段只能在Linux-x86_64运行,不支持Mac和Windows。
|
||||
|
||||
使用方式
|
||||
- 直接推理:可以直接对输入音频进行解码,输出目标文字。
|
||||
- 微调:加载训练好的模型,采用私有或者开源数据进行模型训练。
|
||||
|
||||
使用范围与目标场景
|
||||
- 适合与离线语音识别场景,如录音文件转写,配合GPU推理效果更加,推荐输入语音时长在20s以下,若想解码长音频,推荐使用[Paraformer-large长音频模型](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary),集成VAD、ASR、标点与时间戳功能,可直接对时长为数小时音频进行识别,并输出带标点文字与时间戳。
|
||||
|
||||
|
||||
## 模型局限性以及可能的偏差
|
||||
|
||||
考虑到特征提取流程和工具以及训练工具差异,会对CER的数据带来一定的差异(<0.1%),推理GPU环境差异导致的RTF数值差异。
|
||||
|
||||
|
||||
|
||||
## 相关论文以及引用信息
|
||||
|
||||
```BibTeX
|
||||
@inproceedings{gao2022paraformer,
|
||||
title={Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition},
|
||||
author={Gao, Zhifu and Zhang, Shiliang and McLoughlin, Ian and Yan, Zhijie},
|
||||
booktitle={INTERSPEECH},
|
||||
year={2022}
|
||||
}
|
||||
```
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"framework": "pytorch",
|
||||
"task" : "auto-speech-recognition",
|
||||
"model" : {
|
||||
"type" : "generic-asr",
|
||||
"am_model_name" : "model.pb",
|
||||
"model_config" : {
|
||||
"type": "pytorch",
|
||||
"code_base": "funasr",
|
||||
"mode": "paraformer",
|
||||
"lang": "zh-cn",
|
||||
"batch_size": 1,
|
||||
"am_model_config": "config.yaml",
|
||||
"lm_model_name" : "lm/lm.pb",
|
||||
"lm_model_config": "lm/lm.yaml",
|
||||
"asr_model_config": "decoding.yaml",
|
||||
"mvn_file": "am.mvn",
|
||||
"model": "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
}
|
||||
},
|
||||
"pipeline": {
|
||||
"type":"asr-inference"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
beam_size: 1
|
||||
penalty: 0.0
|
||||
maxlenratio: 0.0
|
||||
minlenratio: 0.0
|
||||
ctc_weight: 0.0
|
||||
lm_weight: 0.0
|
||||
Binary file not shown.
Binary file not shown.
|
After Width: | Height: | Size: 49 KiB |
@@ -0,0 +1,37 @@
|
||||
# minibatch related
|
||||
# # dataset_type: small
|
||||
batch_type: length
|
||||
batch_bins: 2000
|
||||
num_workers: 16
|
||||
speech_length_min: 100
|
||||
speech_length_max: 15000
|
||||
# dataset_type: large
|
||||
dataset_conf:
|
||||
data_types: sound,text
|
||||
filter_conf:
|
||||
speech_length_min: 100
|
||||
speech_length_max: 15000
|
||||
token_length_min: 0
|
||||
token_length_max: 200
|
||||
shuffle: true
|
||||
shuffle_conf:
|
||||
shuffle_size: 2048
|
||||
sort_size: 500
|
||||
batch_conf:
|
||||
batch_type: 'token'
|
||||
batch_size: 120000
|
||||
num_workers: 16
|
||||
|
||||
# optimization related
|
||||
accum_grad: 1
|
||||
grad_clip: 5
|
||||
max_epoch: 20
|
||||
keep_nbest_models: 10
|
||||
optim: adam
|
||||
optim_conf:
|
||||
lr: 0.0005
|
||||
scheduler: warmuplr
|
||||
scheduler_conf:
|
||||
warmup_steps: 30000
|
||||
|
||||
log_interval: 50
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5b59db5f607fde57bd8d6c8df8f22ce5b37ec7adcdb7b4ee785a4478f06c1c50
|
||||
size 237373385
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5bba782a5e9196166233b9ab12ba04cadff9ef9212b4ff6153ed9290ff679025
|
||||
size 880502012
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user