collab fx

This commit is contained in:
adamnsandle
2021-12-07 10:54:50 +00:00
parent 8af246df49
commit f638c47595
2 changed files with 660 additions and 661 deletions

View File

@@ -1,42 +1,4 @@
{ {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "silero-vad.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
}
},
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -68,15 +30,17 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "5w5AkskZ2Fwr" "id": "5w5AkskZ2Fwr"
}, },
"outputs": [],
"source": [ "source": [
"#@title Install and Import Dependencies\n", "#@title Install and Import Dependencies\n",
"\n", "\n",
"# this assumes that you have a relevant version of PyTorch installed\n", "# this assumes that you have a relevant version of PyTorch installed\n",
"!pip install -q torchaudio soundfile\n", "!pip install -q torchaudio\n",
"\n", "\n",
"SAMPLE_RATE = 16000\n", "SAMPLE_RATE = 16000\n",
"\n", "\n",
@@ -98,9 +62,7 @@
" collect_chunks) = utils\n", " collect_chunks) = utils\n",
"\n", "\n",
"files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'" "files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -122,31 +84,31 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"id": "aI_eydBPjsrx" "id": "aI_eydBPjsrx"
}, },
"outputs": [],
"source": [ "source": [
"wav = read_audio(f'{files_dir}/en.wav', sampling_rate=SAMPLE_RATE)\n", "wav = read_audio(f'{files_dir}/en.wav', sampling_rate=SAMPLE_RATE)\n",
"# get speech timestamps from full audio file\n", "# get speech timestamps from full audio file\n",
"speech_timestamps = get_speech_timestamps(wav, model, sampling_rate=SAMPLE_RATE)\n", "speech_timestamps = get_speech_timestamps(wav, model, sampling_rate=SAMPLE_RATE)\n",
"pprint(speech_timestamps)" "pprint(speech_timestamps)"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"id": "OuEobLchjsry" "id": "OuEobLchjsry"
}, },
"outputs": [],
"source": [ "source": [
"# merge all speech chunks to one audio\n", "# merge all speech chunks to one audio\n",
"save_audio('only_speech.wav',\n", "save_audio('only_speech.wav',\n",
" collect_chunks(speech_timestamps, wav), sampling_rate=16000) \n", " collect_chunks(speech_timestamps, wav), sampling_rate=16000) \n",
"Audio('only_speech.wav')" "Audio('only_speech.wav')"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -154,19 +116,21 @@
"id": "iDKQbVr8jsry" "id": "iDKQbVr8jsry"
}, },
"source": [ "source": [
"**Stream imitation example**" "### Stream imitation example"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"id": "q-lql_2Wjsry" "id": "q-lql_2Wjsry"
}, },
"outputs": [],
"source": [ "source": [
"## using VADIterator class\n", "## using VADIterator class\n",
"\n", "\n",
"vad_iterator = VADiterator(double_model)\n", "vad_iterator = VADIterator(model)\n",
"wav = read_audio((f'{files_dir}/en.wav', sampling_rate=SAMPLE_RATE)\n", "wav = read_audio(f'{files_dir}/en.wav', sampling_rate=SAMPLE_RATE)\n",
"\n", "\n",
"window_size_samples = 1536 # number of samples in a single audio chunk\n", "window_size_samples = 1536 # number of samples in a single audio chunk\n",
"for i in range(0, len(wav), window_size_samples):\n", "for i in range(0, len(wav), window_size_samples):\n",
@@ -174,19 +138,19 @@
" if speech_dict:\n", " if speech_dict:\n",
" print(speech_dict, end=' ')\n", " print(speech_dict, end=' ')\n",
"vad_iterator.reset_states() # reset model states after each audio" "vad_iterator.reset_states() # reset model states after each audio"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"id": "BX3UgwwB2Fwv" "id": "BX3UgwwB2Fwv"
}, },
"outputs": [],
"source": [ "source": [
"## just probabilities\n", "## just probabilities\n",
"\n", "\n",
"wav = read_audio((f'{files_dir}/en.wav', sampling_rate=SAMPLE_RATE)\n", "wav = read_audio(f'{files_dir}/en.wav', sampling_rate=SAMPLE_RATE)\n",
"speech_probs = []\n", "speech_probs = []\n",
"window_size_samples = 1536\n", "window_size_samples = 1536\n",
"for i in range(0, len(wav), window_size_samples):\n", "for i in range(0, len(wav), window_size_samples):\n",
@@ -194,9 +158,7 @@
" speech_probs.append(speech_prob)\n", " speech_probs.append(speech_prob)\n",
"\n", "\n",
"pprint(speech_probs[:100])" "pprint(speech_probs[:100])"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -221,10 +183,12 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "Kq5gQuYq2Fwx" "id": "Kq5gQuYq2Fwx"
}, },
"outputs": [],
"source": [ "source": [
"#@title Install and Import Dependencies\n", "#@title Install and Import Dependencies\n",
"\n", "\n",
@@ -249,9 +213,7 @@
" drop_chunks) = utils\n", " drop_chunks) = utils\n",
"\n", "\n",
"files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'" "files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -266,64 +228,64 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "EXpau6xq2Fwy" "id": "EXpau6xq2Fwy"
}, },
"outputs": [],
"source": [ "source": [
"wav = read_audio(f'{files_dir}/en_num.wav')\n", "wav = read_audio(f'{files_dir}/en_num.wav')\n",
"# get number timestamps from full audio file\n", "# get number timestamps from full audio file\n",
"number_timestamps = get_number_ts(wav, model)\n", "number_timestamps = get_number_ts(wav, model)\n",
"pprint(number_timestamps)" "pprint(number_timestamps)"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "u-KfXRhZ2Fwy" "id": "u-KfXRhZ2Fwy"
}, },
"outputs": [],
"source": [ "source": [
"sample_rate = 16000\n", "sample_rate = 16000\n",
"# convert ms in timestamps to samples\n", "# convert ms in timestamps to samples\n",
"for timestamp in number_timestamps:\n", "for timestamp in number_timestamps:\n",
" timestamp['start'] = int(timestamp['start'] * sample_rate / 1000)\n", " timestamp['start'] = int(timestamp['start'] * sample_rate / 1000)\n",
" timestamp['end'] = int(timestamp['end'] * sample_rate / 1000)" " timestamp['end'] = int(timestamp['end'] * sample_rate / 1000)"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "iwYEC4aZ2Fwy" "id": "iwYEC4aZ2Fwy"
}, },
"outputs": [],
"source": [ "source": [
"# merge all number chunks to one audio\n", "# merge all number chunks to one audio\n",
"save_audio('only_numbers.wav',\n", "save_audio('only_numbers.wav',\n",
" collect_chunks(number_timestamps, wav), sample_rate) \n", " collect_chunks(number_timestamps, wav), sample_rate) \n",
"Audio('only_numbers.wav')" "Audio('only_numbers.wav')"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "fHaYejX12Fwy" "id": "fHaYejX12Fwy"
}, },
"outputs": [],
"source": [ "source": [
"# drop all number chunks from audio\n", "# drop all number chunks from audio\n",
"save_audio('no_numbers.wav',\n", "save_audio('no_numbers.wav',\n",
" drop_chunks(number_timestamps, wav), sample_rate) \n", " drop_chunks(number_timestamps, wav), sample_rate) \n",
"Audio('no_numbers.wav')" "Audio('no_numbers.wav')"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -348,10 +310,12 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "Zu9D0t6n2Fwz" "id": "Zu9D0t6n2Fwz"
}, },
"outputs": [],
"source": [ "source": [
"#@title Install and Import Dependencies\n", "#@title Install and Import Dependencies\n",
"\n", "\n",
@@ -373,9 +337,7 @@
" read_audio) = utils\n", " read_audio) = utils\n",
"\n", "\n",
"files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'" "files_dir = torch.hub.get_dir() + '/snakers4_silero-vad_master/files'"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -390,17 +352,17 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "c8UYnYBF2Fw0" "id": "c8UYnYBF2Fw0"
}, },
"outputs": [],
"source": [ "source": [
"wav = read_audio(f'{files_dir}/en.wav')\n", "wav = read_audio(f'{files_dir}/en.wav')\n",
"lang = get_language(wav, model)\n", "lang = get_language(wav, model)\n",
"print(lang)" "print(lang)"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -452,11 +414,13 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"cellView": "form", "cellView": "form",
"hidden": true, "hidden": true,
"id": "PdjGd56R2Fw5" "id": "PdjGd56R2Fw5"
}, },
"outputs": [],
"source": [ "source": [
"#@title Install and Import Dependencies\n", "#@title Install and Import Dependencies\n",
"\n", "\n",
@@ -491,9 +455,7 @@
" outs = model.run(None, ort_inputs)\n", " outs = model.run(None, ort_inputs)\n",
" outs = [torch.Tensor(x) for x in outs]\n", " outs = [torch.Tensor(x) for x in outs]\n",
" return outs" " return outs"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -508,10 +470,12 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "_r6QZiwu2Fw5" "id": "_r6QZiwu2Fw5"
}, },
"outputs": [],
"source": [ "source": [
"model = init_onnx_model(f'{files_dir}/number_detector.onnx')\n", "model = init_onnx_model(f'{files_dir}/number_detector.onnx')\n",
"wav = read_audio(f'{files_dir}/en_num.wav')\n", "wav = read_audio(f'{files_dir}/en_num.wav')\n",
@@ -519,55 +483,53 @@
"# get number timestamps from full audio file\n", "# get number timestamps from full audio file\n",
"number_timestamps = get_number_ts(wav, model, run_function=validate_onnx)\n", "number_timestamps = get_number_ts(wav, model, run_function=validate_onnx)\n",
"pprint(number_timestamps)" "pprint(number_timestamps)"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "FN4aDwLV2Fw5" "id": "FN4aDwLV2Fw5"
}, },
"outputs": [],
"source": [ "source": [
"sample_rate = 16000\n", "sample_rate = 16000\n",
"# convert ms in timestamps to samples\n", "# convert ms in timestamps to samples\n",
"for timestamp in number_timestamps:\n", "for timestamp in number_timestamps:\n",
" timestamp['start'] = int(timestamp['start'] * sample_rate / 1000)\n", " timestamp['start'] = int(timestamp['start'] * sample_rate / 1000)\n",
" timestamp['end'] = int(timestamp['end'] * sample_rate / 1000)" " timestamp['end'] = int(timestamp['end'] * sample_rate / 1000)"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "JnvS6WTK2Fw5" "id": "JnvS6WTK2Fw5"
}, },
"outputs": [],
"source": [ "source": [
"# merge all number chunks to one audio\n", "# merge all number chunks to one audio\n",
"save_audio('only_numbers.wav',\n", "save_audio('only_numbers.wav',\n",
" collect_chunks(number_timestamps, wav), 16000) \n", " collect_chunks(number_timestamps, wav), 16000) \n",
"Audio('only_numbers.wav')" "Audio('only_numbers.wav')"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "yUxOcOFG2Fw6" "id": "yUxOcOFG2Fw6"
}, },
"outputs": [],
"source": [ "source": [
"# drop all number chunks from audio\n", "# drop all number chunks from audio\n",
"save_audio('no_numbers.wav',\n", "save_audio('no_numbers.wav',\n",
" drop_chunks(number_timestamps, wav), 16000) \n", " drop_chunks(number_timestamps, wav), 16000) \n",
"Audio('no_numbers.wav')" "Audio('no_numbers.wav')"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -592,11 +554,13 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"cellView": "form", "cellView": "form",
"hidden": true, "hidden": true,
"id": "iNkDWJ3H2Fw6" "id": "iNkDWJ3H2Fw6"
}, },
"outputs": [],
"source": [ "source": [
"#@title Install and Import Dependencies\n", "#@title Install and Import Dependencies\n",
"\n", "\n",
@@ -628,9 +592,7 @@
" outs = model.run(None, ort_inputs)\n", " outs = model.run(None, ort_inputs)\n",
" outs = [torch.Tensor(x) for x in outs]\n", " outs = [torch.Tensor(x) for x in outs]\n",
" return outs" " return outs"
], ]
"execution_count": null,
"outputs": []
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@@ -644,19 +606,57 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null,
"metadata": { "metadata": {
"hidden": true, "hidden": true,
"id": "WHXnh9IV2Fw6" "id": "WHXnh9IV2Fw6"
}, },
"outputs": [],
"source": [ "source": [
"model = init_onnx_model(f'{files_dir}/number_detector.onnx')\n", "model = init_onnx_model(f'{files_dir}/number_detector.onnx')\n",
"wav = read_audio(f'{files_dir}/en.wav')\n", "wav = read_audio(f'{files_dir}/en.wav')\n",
"\n", "\n",
"lang = get_language(wav, model, run_function=validate_onnx)\n", "lang = get_language(wav, model, run_function=validate_onnx)\n",
"print(lang)" "print(lang)"
],
"execution_count": null,
"outputs": []
}
] ]
}
],
"metadata": {
"colab": {
"name": "silero-vad.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 0
} }

View File

@@ -20,7 +20,6 @@ def validate(model,
def read_audio(path: str, def read_audio(path: str,
sampling_rate: int = 16000): sampling_rate: int = 16000):
assert torchaudio.get_audio_backend() == 'soundfile'
wav, sr = torchaudio.load(path) wav, sr = torchaudio.load(path)
if wav.size(0) > 1: if wav.size(0) > 1: