diff --git a/examples/microphone_and_webRTC_integration/microphone_and_webRTC_integration.py b/examples/microphone_and_webRTC_integration/microphone_and_webRTC_integration.py index 2474657..74e56ef 100644 --- a/examples/microphone_and_webRTC_integration/microphone_and_webRTC_integration.py +++ b/examples/microphone_and_webRTC_integration/microphone_and_webRTC_integration.py @@ -186,7 +186,7 @@ if __name__ == '__main__': help="same as trig_sum, but for switching from triggered to non-triggered state (non-speech)") parser.add_argument('-N', '--num_steps', type=int, default=8, - help="nubmer of overlapping windows to split audio chunk into (we recommend 4 or 8)") + help="number of overlapping windows to split audio chunk into (we recommend 4 or 8)") parser.add_argument('-nspw', '--num_samples_per_window', type=int, default=4000, help="number of samples in each window, our models were trained using 4000 samples (250 ms) per window, so this is preferable value (lesser values reduce quality)") @@ -198,4 +198,4 @@ if __name__ == '__main__': help=" minimum silence duration in samples between to separate speech chunks") ARGS = parser.parse_args() ARGS.rate=DEFAULT_SAMPLE_RATE - main(ARGS) \ No newline at end of file + main(ARGS)