diff --git a/README.md b/README.md index eb03f7a..0c9ef5c 100644 --- a/README.md +++ b/README.md @@ -322,6 +322,7 @@ Since our VAD (only VAD, other networks are more flexible) was trained on chunks - `neg_trig_sum` - same as `trig_sum`, but for switching from triggered to non-triggered state (non-speech) - `num_steps` - nubmer of overlapping windows to split audio chunk into (we recommend 4 or 8) - `num_samples_per_window` - number of samples in each window, our models were trained using `4000` samples (250 ms) per window, so this is preferable value (lesser values reduce [quality](https://github.com/snakers4/silero-vad/issues/2#issuecomment-750840434)); +- `min_speech_samples` - minimum speech chunk duration in samples ### How VAD Works diff --git a/utils.py b/utils.py index bff6a3d..5fafca7 100644 --- a/utils.py +++ b/utils.py @@ -59,6 +59,7 @@ def get_speech_ts(wav: torch.Tensor, num_steps: int = 8, batch_size: int = 200, num_samples_per_window: int = 4000, + min_speech_samples: int = 10000, #samples run_function=validate): num_samples = num_samples_per_window @@ -97,7 +98,7 @@ def get_speech_ts(wav: torch.Tensor, current_speech['start'] = step * max(0, i-num_steps) if ((sum(buffer) / len(buffer)) < neg_trig_sum) and triggered: current_speech['end'] = step * i - if (current_speech['end'] - current_speech['start']) > 10000: + if (current_speech['end'] - current_speech['start']) > min_speech_samples: speeches.append(current_speech) current_speech = {} triggered = False