Merge pull request #7 from snakers4/adamnsandle

Adamnsandle
This commit is contained in:
Alexander Veysov
2020-12-24 13:42:14 +03:00
committed by GitHub
2 changed files with 22 additions and 12 deletions

View File

@@ -66,7 +66,8 @@ Currently we provide the following functionality:
| Version | Date | Comment |
|---------|-------------|---------------------------------------------------|
| `v1` | 2020-12-15 | Initial release |
| `v2` | coming soon | Add Number Detector or Language Classifier heads, lift 250 ms chunk VAD limitation |
| `v1.1` | 2020-12-24 | better vad models compatible with chunks shorter than 250 ms
| `v2` | coming soon | Add Number Detector and Language Classifier heads |
### PyTorch
@@ -164,8 +165,6 @@ So **batch size** for streaming is **num_steps * number of audio streams**. Time
| **120** | 96 | 85 |
| **200** | 157 | 137 |
We are working on lifting this 250 ms constraint.
#### Full Audio Throughput
**RTS** (seconds of audio processed per second, real time speed, or 1 / RTF) for full audio processing depends on **num_steps** (see previous paragraph) and **batch size** (bigger is better).
@@ -193,6 +192,12 @@ Since our VAD (only VAD, other networks are more flexible) was trained on chunks
## FAQ
### Method' argument to use for VAD quality/speed tuning
- `trig_sum` - overlapping windows are used for each audio chunk, trig sum defines average probability among those windows for switching into triggered state (speech state)
- `neg_trig_sum` - same as `trig_sum`, but for switching from triggered to non-triggered state (no speech)
- `num_steps` - nubmer of overlapping windows to split audio chunk by (we recommend 4 or 8)
- `num_samples_per_window` - number of samples in each window, our models were trained using `4000` samples (250 ms) per window, so this is preferable value (lesser reduces quality)
### How VAD Works
- Audio is split into 250 ms chunks;

View File

@@ -55,9 +55,10 @@ def get_speech_ts(wav: torch.Tensor,
neg_trig_sum: float = 0.07,
num_steps: int = 8,
batch_size: int = 200,
num_samples_per_window: int = 4000,
run_function=validate):
num_samples = 4000
num_samples = num_samples_per_window
assert num_samples % num_steps == 0
step = int(num_samples / num_steps) # stride / hop
outs = []
@@ -108,8 +109,9 @@ class VADiterator:
def __init__(self,
trig_sum: float = 0.26,
neg_trig_sum: float = 0.07,
num_steps: int = 8):
self.num_samples = 4000
num_steps: int = 8,
num_samples_per_window: int = 4000):
self.num_samples = num_samples_per_window
self.num_steps = num_steps
assert self.num_samples % num_steps == 0
self.step = int(self.num_samples / num_steps) # 500 samples is good enough
@@ -170,10 +172,11 @@ def state_generator(model,
trig_sum: float = 0.26,
neg_trig_sum: float = 0.07,
num_steps: int = 8,
num_samples_per_window: int = 4000,
audios_in_stream: int = 2,
run_function=validate):
VADiters = [VADiterator(trig_sum, neg_trig_sum, num_steps) for i in range(audios_in_stream)]
for i, current_pieces in enumerate(stream_imitator(audios, audios_in_stream)):
VADiters = [VADiterator(trig_sum, neg_trig_sum, num_steps, num_samples_per_window) for i in range(audios_in_stream)]
for i, current_pieces in enumerate(stream_imitator(audios, audios_in_stream, num_samples_per_window)):
for_batch = [x.prepare_batch(*y) for x, y in zip(VADiters, current_pieces)]
batch = torch.cat(for_batch)
@@ -189,10 +192,11 @@ def state_generator(model,
def stream_imitator(audios: List[str],
audios_in_stream: int):
audios_in_stream: int,
num_samples_per_window: int = 4000):
audio_iter = iter(audios)
iterators = []
num_samples = 4000
num_samples = num_samples_per_window
# initial wavs
for i in range(audios_in_stream):
next_wav = next(audio_iter)
@@ -229,9 +233,10 @@ def single_audio_stream(model,
trig_sum: float = 0.26,
neg_trig_sum: float = 0.07,
num_steps: int = 8,
num_samples_per_window: int = 4000,
run_function=validate):
num_samples = 4000
VADiter = VADiterator(trig_sum, neg_trig_sum, num_steps)
num_samples = num_samples_per_window
VADiter = VADiterator(trig_sum, neg_trig_sum, num_steps, num_samples_per_window)
wav = read_audio(audio)
wav_chunks = iter([wav[i:i+num_samples] for i in range(0, len(wav), num_samples)])
for chunk in wav_chunks: