{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Jit example" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!pip install -q ipython # For jupyter audio display" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:54.623434Z", "start_time": "2020-12-15T13:09:54.241855Z" } }, "outputs": [], "source": [ "# dependencies\n", "import glob\n", "import torch\n", "torch.set_num_threads(1)\n", "from IPython.display import Audio\n", "\n", "model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',\n", " model='silero_vad')\n", "\n", "\n", "(get_speech_ts,\n", " save_audio,\n", " read_audio,\n", " state_generator,\n", " single_audio_stream,\n", " collect_speeches) = utils" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Full audio" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:56.879818Z", "start_time": "2020-12-15T13:09:56.864765Z" } }, "outputs": [], "source": [ "wav = read_audio('files/en.wav')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:58.876034Z", "start_time": "2020-12-15T13:09:57.139254Z" } }, "outputs": [], "source": [ "speech_timestamps = get_speech_ts(wav, model, num_steps=4) # get speech timestamps from full audio file" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:58.885802Z", "start_time": "2020-12-15T13:09:58.877327Z" } }, "outputs": [], "source": [ "speech_timestamps" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:58.941063Z", "start_time": "2020-12-15T13:09:58.887006Z" } }, "outputs": [], "source": [ "save_audio('only_speech.wav', collect_speeches(speech_timestamps, wav), 16000) # merge all speech chunks to one audio\n", "Audio('only_speech.wav')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Single audio stream" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:59.199321Z", "start_time": "2020-12-15T13:09:59.196823Z" } }, "outputs": [], "source": [ "wav = 'files/en.wav'" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:10:03.585644Z", "start_time": "2020-12-15T13:09:59.429757Z" } }, "outputs": [], "source": [ "for batch in single_audio_stream(model, wav):\n", " if batch:\n", " print(batch)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multiple audio stream" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:10:03.590358Z", "start_time": "2020-12-15T13:10:03.587071Z" } }, "outputs": [], "source": [ "audios_for_stream = glob.glob('files/*.wav')\n", "len(audios_for_stream) # total 4 audios" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:10:15.762491Z", "start_time": "2020-12-15T13:10:03.591388Z" } }, "outputs": [], "source": [ "for batch in state_generator(model, audios_for_stream, audios_in_stream=2): # 2 audio stream\n", " if batch:\n", " print(batch)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Onnx example" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!pip install -q ipython # For jupyter audio display\n", "!pip install -q onnxruntime" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:05.932256Z", "start_time": "2020-12-15T13:09:05.043659Z" } }, "outputs": [], "source": [ "# dependencies\n", "import glob\n", "import torch\n", "from IPython.display import Audio\n", "torch.set_num_threads(1)\n", "import onnxruntime\n", "\n", "from utils import (get_speech_ts, save_audio, read_audio, \n", " state_generator, single_audio_stream, collect_speeches)\n", "\n", "def init_onnx_model(model_path: str):\n", " return onnxruntime.InferenceSession(model_path)\n", "\n", "def validate_onnx(model, inputs):\n", " with torch.no_grad():\n", " ort_inputs = {'input': inputs.cpu().numpy()}\n", " outs = model.run(None, ort_inputs)\n", " outs = [torch.Tensor(x) for x in outs]\n", " return outs" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Full audio" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:06.643812Z", "start_time": "2020-12-15T13:09:06.473386Z" } }, "outputs": [], "source": [ "model = init_onnx_model('files/model.onnx')\n", "wav = read_audio('files/en.wav')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:08.094414Z", "start_time": "2020-12-15T13:09:07.073253Z" } }, "outputs": [], "source": [ "speech_timestamps = get_speech_ts(wav, model, num_steps=4, run_function=validate_onnx) # get speech timestamps from full audio file" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:08.107584Z", "start_time": "2020-12-15T13:09:08.096550Z" } }, "outputs": [], "source": [ "speech_timestamps" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:08.862421Z", "start_time": "2020-12-15T13:09:08.820014Z" } }, "outputs": [], "source": [ "save_audio('only_speech.wav', collect_speeches(speech_timestamps, wav), 16000) # merge all speech chunks to one audio\n", "Audio('only_speech.wav')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Single audio stream" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:09.606031Z", "start_time": "2020-12-15T13:09:09.504239Z" } }, "outputs": [], "source": [ "model = init_onnx_model('files/model.onnx')\n", "wav = 'files/en.wav'" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:11.453171Z", "start_time": "2020-12-15T13:09:09.633435Z" } }, "outputs": [], "source": [ "for batch in single_audio_stream(model, wav, run_function=validate_onnx):\n", " if batch:\n", " print(batch)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multiple audio stream" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:11.540423Z", "start_time": "2020-12-15T13:09:11.455706Z" } }, "outputs": [], "source": [ "model = init_onnx_model('files/model.onnx')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:11.550815Z", "start_time": "2020-12-15T13:09:11.542954Z" } }, "outputs": [], "source": [ "audios_for_stream = glob.glob('files/*.wav')\n", "len(audios_for_stream) # total 4 audios" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-12-15T13:09:19.565434Z", "start_time": "2020-12-15T13:09:11.552097Z" } }, "outputs": [], "source": [ "for batch in state_generator(model, audios_for_stream, audios_in_stream=2, run_function=validate_onnx): # 2 audio stream\n", " if batch:\n", " print(batch)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.3" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 4 }