Merge pull request #8 from snakers4/sontref

Add Number Detector + utils
This commit is contained in:
Alexander Veysov
2020-12-30 21:16:47 +03:00
committed by GitHub
7 changed files with 400 additions and 29 deletions

BIN
files/en_num.wav Normal file

Binary file not shown.

BIN
files/number_detector.jit Normal file

Binary file not shown.

BIN
files/number_detector.onnx Normal file

Binary file not shown.

BIN
files/ru_num.wav Normal file

Binary file not shown.

View File

@@ -2,11 +2,13 @@ dependencies = ['torch', 'torchaudio']
import torch import torch
from utils import (init_jit_model, from utils import (init_jit_model,
get_speech_ts, get_speech_ts,
get_number_ts,
save_audio, save_audio,
read_audio, read_audio,
state_generator, state_generator,
single_audio_stream, single_audio_stream,
collect_speeches) collect_chunks,
drop_chunks)
def silero_vad(**kwargs): def silero_vad(**kwargs):
@@ -21,6 +23,22 @@ def silero_vad(**kwargs):
read_audio, read_audio,
state_generator, state_generator,
single_audio_stream, single_audio_stream,
collect_speeches) collect_chunks)
return model, utils
def silero_number_detector(**kwargs):
"""Silero Number Detector and Language Classifier
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/number_detector.jit')
utils = (get_number_ts,
save_audio,
read_audio,
collect_chunks,
drop_chunks)
return model, utils return model, utils

354
silero-vad.ipynb Normal file → Executable file
View File

@@ -6,16 +6,26 @@
"id": "sVNOuHQQjsrp" "id": "sVNOuHQQjsrp"
}, },
"source": [ "source": [
"# PyTorch Example" "# PyTorch Examples"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"id": "9ZTzCtc5kYVg" "heading_collapsed": true
}, },
"source": [ "source": [
"## Install Dependencies" "## VAD"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true,
"hidden": true
},
"source": [
"### Install Dependencies"
] ]
}, },
{ {
@@ -23,12 +33,10 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
"end_time": "2020-12-15T14:00:15.701867Z", "end_time": "2020-12-30T17:35:43.397137Z",
"start_time": "2020-12-15T14:00:09.512876Z" "start_time": "2020-12-30T17:33:10.962078Z"
}, },
"cellView": "form", "hidden": true
"collapsed": true,
"id": "rllMjjsekbjt"
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
@@ -53,7 +61,7 @@
" read_audio,\n", " read_audio,\n",
" state_generator,\n", " state_generator,\n",
" single_audio_stream,\n", " single_audio_stream,\n",
" collect_speeches) = utils\n", " collect_chunks) = utils\n",
"\n", "\n",
"files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'" "files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'"
] ]
@@ -61,10 +69,12 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "fXbbaUO3jsrw" "id": "fXbbaUO3jsrw"
}, },
"source": [ "source": [
"## Full Audio" "### Full Audio"
] ]
}, },
{ {
@@ -72,9 +82,10 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
"end_time": "2020-12-15T13:09:56.879818Z", "end_time": "2020-12-30T17:35:44.362860Z",
"start_time": "2020-12-15T13:09:56.864765Z" "start_time": "2020-12-30T17:35:43.398441Z"
}, },
"hidden": true,
"id": "aI_eydBPjsrx" "id": "aI_eydBPjsrx"
}, },
"outputs": [], "outputs": [],
@@ -91,26 +102,29 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
"end_time": "2020-12-15T13:09:58.941063Z", "end_time": "2020-12-30T17:35:44.419280Z",
"start_time": "2020-12-15T13:09:58.887006Z" "start_time": "2020-12-30T17:35:44.364175Z"
}, },
"hidden": true,
"id": "OuEobLchjsry" "id": "OuEobLchjsry"
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"# merge all speech chunks to one audio\n", "# merge all speech chunks to one audio\n",
"save_audio('only_speech.wav',\n", "save_audio('only_speech.wav',\n",
" collect_speeches(speech_timestamps, wav), 16000) \n", " collect_chunks(speech_timestamps, wav), 16000) \n",
"Audio('only_speech.wav')" "Audio('only_speech.wav')"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "iDKQbVr8jsry" "id": "iDKQbVr8jsry"
}, },
"source": [ "source": [
"## Single Audio Stream" "### Single Audio Stream"
] ]
}, },
{ {
@@ -121,6 +135,7 @@
"end_time": "2020-12-15T13:09:59.199321Z", "end_time": "2020-12-15T13:09:59.199321Z",
"start_time": "2020-12-15T13:09:59.196823Z" "start_time": "2020-12-15T13:09:59.196823Z"
}, },
"hidden": true,
"id": "q-lql_2Wjsry" "id": "q-lql_2Wjsry"
}, },
"outputs": [], "outputs": [],
@@ -135,10 +150,12 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "KBDVybJCjsrz" "id": "KBDVybJCjsrz"
}, },
"source": [ "source": [
"## Multiple Audio Streams" "### Multiple Audio Streams"
] ]
}, },
{ {
@@ -149,6 +166,7 @@
"end_time": "2020-12-15T13:10:03.590358Z", "end_time": "2020-12-15T13:10:03.590358Z",
"start_time": "2020-12-15T13:10:03.587071Z" "start_time": "2020-12-15T13:10:03.587071Z"
}, },
"hidden": true,
"id": "BK4tGfWgjsrz" "id": "BK4tGfWgjsrz"
}, },
"outputs": [], "outputs": [],
@@ -165,6 +183,7 @@
"end_time": "2020-12-15T13:10:15.762491Z", "end_time": "2020-12-15T13:10:15.762491Z",
"start_time": "2020-12-15T13:10:03.591388Z" "start_time": "2020-12-15T13:10:03.591388Z"
}, },
"hidden": true,
"id": "v1l8sam1jsrz" "id": "v1l8sam1jsrz"
}, },
"outputs": [], "outputs": [],
@@ -174,6 +193,125 @@
" pprint(batch)" " pprint(batch)"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [
"## Number detector"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true,
"hidden": true
},
"source": [
"### Install Dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"#@title Install and Import Dependencies\n",
"\n",
"# this assumes that you have a relevant version of PyTorch installed\n",
"!pip install -q torchaudio soundfile\n",
"\n",
"import glob\n",
"import torch\n",
"torch.set_num_threads(1)\n",
"\n",
"from IPython.display import Audio\n",
"from pprint import pprint\n",
"\n",
"model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',\n",
" model='silero_number_detector',\n",
" force_reload=True)\n",
"\n",
"(get_number_ts,\n",
" save_audio,\n",
" read_audio,\n",
" collect_chunks,\n",
" drop_chunks) = utils\n",
"\n",
"files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true,
"hidden": true
},
"source": [
"### Full audio"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"wav = read_audio(f'{files_dir}/en_num.wav')\n",
"# get number timestamps from full audio file\n",
"number_timestamps = get_number_ts(wav, model)\n",
"pprint(number_timestamps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"sample_rate = 16000\n",
"# convert ms in timestamps to samples\n",
"for timestamp in number_timestamps:\n",
" timestamp['start'] = int(timestamp['start'] * sample_rate)\n",
" timestamp['end'] = int(timestamp['end'] * sample_rate)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# merge all number chunks to one audio\n",
"save_audio('only_numbers.wav',\n",
" collect_chunks(number_timestamps, wav), sample_rate) \n",
"Audio('only_numbers.wav')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# drop all number chunks from audio\n",
"save_audio('no_numbers.wav',\n",
" drop_chunks(number_timestamps, wav), sample_rate) \n",
"Audio('no_numbers.wav')"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
@@ -186,10 +324,21 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"heading_collapsed": true
},
"source": [
"## VAD"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "bL4kn4KJrlyL" "id": "bL4kn4KJrlyL"
}, },
"source": [ "source": [
"## Install Dependencies" "### Install Dependencies"
] ]
}, },
{ {
@@ -197,6 +346,7 @@
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"cellView": "form", "cellView": "form",
"hidden": true,
"id": "Q4QIfSpprnkI" "id": "Q4QIfSpprnkI"
}, },
"outputs": [], "outputs": [],
@@ -239,10 +389,12 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "5JHErdB7jsr0" "id": "5JHErdB7jsr0"
}, },
"source": [ "source": [
"## Full Audio" "### Full Audio"
] ]
}, },
{ {
@@ -253,6 +405,7 @@
"end_time": "2020-12-15T13:09:06.643812Z", "end_time": "2020-12-15T13:09:06.643812Z",
"start_time": "2020-12-15T13:09:06.473386Z" "start_time": "2020-12-15T13:09:06.473386Z"
}, },
"hidden": true,
"id": "krnGoA6Kjsr0" "id": "krnGoA6Kjsr0"
}, },
"outputs": [], "outputs": [],
@@ -273,22 +426,25 @@
"end_time": "2020-12-15T13:09:08.862421Z", "end_time": "2020-12-15T13:09:08.862421Z",
"start_time": "2020-12-15T13:09:08.820014Z" "start_time": "2020-12-15T13:09:08.820014Z"
}, },
"hidden": true,
"id": "B176Lzfnjsr1" "id": "B176Lzfnjsr1"
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"# merge all speech chunks to one audio\n", "# merge all speech chunks to one audio\n",
"save_audio('only_speech.wav', collect_speeches(speech_timestamps, wav), 16000)\n", "save_audio('only_speech.wav', collect_chunks(speech_timestamps, wav), 16000)\n",
"Audio('only_speech.wav')" "Audio('only_speech.wav')"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "Rio9W50gjsr1" "id": "Rio9W50gjsr1"
}, },
"source": [ "source": [
"## Single Audio Stream" "### Single Audio Stream"
] ]
}, },
{ {
@@ -299,6 +455,7 @@
"end_time": "2020-12-15T13:09:09.606031Z", "end_time": "2020-12-15T13:09:09.606031Z",
"start_time": "2020-12-15T13:09:09.504239Z" "start_time": "2020-12-15T13:09:09.504239Z"
}, },
"hidden": true,
"id": "IPkl8Yy1jsr1" "id": "IPkl8Yy1jsr1"
}, },
"outputs": [], "outputs": [],
@@ -315,6 +472,7 @@
"end_time": "2020-12-15T13:09:11.453171Z", "end_time": "2020-12-15T13:09:11.453171Z",
"start_time": "2020-12-15T13:09:09.633435Z" "start_time": "2020-12-15T13:09:09.633435Z"
}, },
"hidden": true,
"id": "NC6Jim0hjsr1" "id": "NC6Jim0hjsr1"
}, },
"outputs": [], "outputs": [],
@@ -327,10 +485,12 @@
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "WNZ42u0ajsr1" "id": "WNZ42u0ajsr1"
}, },
"source": [ "source": [
"## Multiple Audio Streams" "### Multiple Audio Streams"
] ]
}, },
{ {
@@ -341,6 +501,7 @@
"end_time": "2020-12-15T13:09:11.540423Z", "end_time": "2020-12-15T13:09:11.540423Z",
"start_time": "2020-12-15T13:09:11.455706Z" "start_time": "2020-12-15T13:09:11.455706Z"
}, },
"hidden": true,
"id": "XjhGQGppjsr1" "id": "XjhGQGppjsr1"
}, },
"outputs": [], "outputs": [],
@@ -358,6 +519,7 @@
"end_time": "2020-12-15T13:09:19.565434Z", "end_time": "2020-12-15T13:09:19.565434Z",
"start_time": "2020-12-15T13:09:11.552097Z" "start_time": "2020-12-15T13:09:11.552097Z"
}, },
"hidden": true,
"id": "QI7-arlqjsr2" "id": "QI7-arlqjsr2"
}, },
"outputs": [], "outputs": [],
@@ -366,6 +528,154 @@
" if batch:\n", " if batch:\n",
" pprint(batch)" " pprint(batch)"
] ]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [
"## Number detector"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true,
"hidden": true,
"id": "bL4kn4KJrlyL"
},
"source": [
"### Install Dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2020-12-30T17:25:19.107534Z",
"start_time": "2020-12-30T17:24:51.853293Z"
},
"cellView": "form",
"hidden": true,
"id": "Q4QIfSpprnkI"
},
"outputs": [],
"source": [
"#@title Install and Import Dependencies\n",
"\n",
"# this assumes that you have a relevant version of PyTorch installed\n",
"!pip install -q torchaudio soundfile onnxruntime\n",
"\n",
"import glob\n",
"import torch\n",
"import onnxruntime\n",
"from pprint import pprint\n",
"\n",
"from IPython.display import Audio\n",
"\n",
"_, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',\n",
" model='silero_number_detector',\n",
" force_reload=True)\n",
"\n",
"(get_number_ts,\n",
" save_audio,\n",
" read_audio,\n",
" collect_chunks,\n",
" drop_chunks) = utils\n",
"\n",
"files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'\n",
"\n",
"def init_onnx_model(model_path: str):\n",
" return onnxruntime.InferenceSession(model_path)\n",
"\n",
"def validate_onnx(model, inputs):\n",
" with torch.no_grad():\n",
" ort_inputs = {'input': inputs.cpu().numpy()}\n",
" outs = model.run(None, ort_inputs)\n",
" outs = [torch.Tensor(x) for x in outs]\n",
" return outs"
]
},
{
"cell_type": "markdown",
"metadata": {
"hidden": true,
"id": "5JHErdB7jsr0"
},
"source": [
"### Full Audio"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2020-12-15T13:09:06.643812Z",
"start_time": "2020-12-15T13:09:06.473386Z"
},
"hidden": true,
"id": "krnGoA6Kjsr0"
},
"outputs": [],
"source": [
"model = init_onnx_model(f'{files_dir}/number_detector.onnx')\n",
"wav = read_audio(f'{files_dir}/en_num.wav')\n",
"\n",
"# get number timestamps from full audio file\n",
"number_timestamps = get_number_ts(wav, model, run_function=validate_onnx)\n",
"pprint(number_timestamps)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"sample_rate = 16000\n",
"# convert ms in timestamps to samples\n",
"for timestamp in number_timestamps:\n",
" timestamp['start'] = int(timestamp['start'] * sample_rate)\n",
" timestamp['end'] = int(timestamp['end'] * sample_rate)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2020-12-15T13:09:08.862421Z",
"start_time": "2020-12-15T13:09:08.820014Z"
},
"hidden": true,
"id": "B176Lzfnjsr1"
},
"outputs": [],
"source": [
"# merge all number chunks to one audio\n",
"save_audio('only_numbers.wav',\n",
" collect_chunks(number_timestamps, wav), 16000) \n",
"Audio('only_numbers.wav')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# drop all number chunks from audio\n",
"save_audio('no_numbers.wav',\n",
" drop_chunks(number_timestamps, wav), 16000) \n",
"Audio('no_numbers.wav')"
]
} }
], ],
"metadata": { "metadata": {

View File

@@ -105,6 +105,39 @@ def get_speech_ts(wav: torch.Tensor,
return speeches return speeches
def get_number_ts(wav: torch.Tensor,
model,
model_stride=8,
hop_length=160,
sample_rate=16000,
run_function=validate):
wav = torch.unsqueeze(wav, dim=0)
perframe_logits = run_function(model, wav)[0]
perframe_preds = torch.argmax(torch.softmax(perframe_logits, dim=1), dim=1).squeeze() # (1, num_frames_strided)
extended_preds = []
for i in perframe_preds:
extended_preds.extend([i.item()] * model_stride)
# len(extended_preds) is *num_frames_real*; for each frame of audio we know if it has a number in it.
triggered = False
timings = []
cur_timing = {}
for i, pred in enumerate(extended_preds):
if pred == 1:
if not triggered:
cur_timing['start'] = (i * hop_length) / sample_rate
triggered = True
elif pred == 0:
if triggered:
cur_timing['end'] = (i * hop_length) / sample_rate
timings.append(cur_timing)
cur_timing = {}
triggered = False
if cur_timing:
cur_timing['end'] = len(wav) / sample_rate
timings.append(cur_timing)
return timings
class VADiterator: class VADiterator:
def __init__(self, def __init__(self,
trig_sum: float = 0.26, trig_sum: float = 0.26,
@@ -252,9 +285,19 @@ def single_audio_stream(model,
yield states yield states
def collect_speeches(tss: List[dict], def collect_chunks(tss: List[dict],
wav: torch.Tensor): wav: torch.Tensor):
speech_chunks = [] chunks = []
for i in tss: for i in tss:
speech_chunks.append(wav[i['start']: i['end']]) chunks.append(wav[i['start']: i['end']])
return torch.cat(speech_chunks) return torch.cat(chunks)
def drop_chunks(tss: List[dict],
wav: torch.Tensor):
chunks = []
cur_start = 0
for i in tss:
chunks.append((wav[cur_start: i['start']]))
cur_start = i['end']
return torch.cat(chunks)