ReplyOnPause and ReplyOnStopWords can be interrupted (#119)

* Add all this code

* add code

* Fix demo

---------

Co-authored-by: Freddy Boulton <freddyboulton@hf-freddy.local>
This commit is contained in:
Freddy Boulton
2025-03-03 21:47:16 -05:00
committed by GitHub
parent 87954a62aa
commit 6ea54777af
13 changed files with 155 additions and 40 deletions

View File

@@ -41,7 +41,7 @@ def response(
response_text = (
groq_client.chat.completions.create(
model="llama-3.1-8b-instant",
max_tokens=512,
max_tokens=200,
messages=messages, # type: ignore
)
.choices[0]
@@ -49,6 +49,7 @@ def response(
)
chatbot.append({"role": "assistant", "content": response_text})
yield AdditionalOutputs(chatbot)
for chunk in tts_client.text_to_speech.convert_as_stream(
text=response_text, # type: ignore
@@ -58,7 +59,6 @@ def response(
):
audio_array = np.frombuffer(chunk, dtype=np.int16).reshape(1, -1)
yield (24000, audio_array)
yield AdditionalOutputs(chatbot)
chatbot = gr.Chatbot(type="messages")

View File

@@ -3,6 +3,7 @@ from typing import Generator, Literal
import gradio as gr
import numpy as np
from dotenv import load_dotenv
from fastrtc import (
AdditionalOutputs,
ReplyOnPause,
@@ -13,6 +14,8 @@ from fastrtc import (
from moonshine_onnx import MoonshineOnnxModel, load_tokenizer
from numpy.typing import NDArray
load_dotenv()
@lru_cache(maxsize=None)
def load_moonshine(
@@ -27,6 +30,7 @@ tokenizer = load_tokenizer()
def stt(
audio: tuple[int, NDArray[np.int16 | np.float32]],
model_name: Literal["moonshine/base", "moonshine/tiny"],
captions: str,
) -> Generator[AdditionalOutputs, None, None]:
moonshine = load_moonshine(model_name)
sr, audio_np = audio # type: ignore
@@ -35,9 +39,12 @@ def stt(
if audio_np.ndim == 1:
audio_np = audio_np.reshape(1, -1)
tokens = moonshine.generate(audio_np)
yield AdditionalOutputs(tokenizer.decode_batch(tokens)[0])
yield AdditionalOutputs(
(captions + "\n" + tokenizer.decode_batch(tokens)[0]).strip()
)
captions = gr.Textbox(label="Captions")
stream = Stream(
ReplyOnPause(stt, input_sample_rate=16000),
modality="audio",
@@ -55,9 +62,10 @@ stream = Stream(
choices=["moonshine/base", "moonshine/tiny"],
value="moonshine/base",
label="Model",
)
),
captions,
],
additional_outputs=[gr.Textbox(label="Captions")],
additional_outputs=[captions],
additional_outputs_handler=lambda prev, current: (prev + "\n" + current).strip(),
)

View File

@@ -15,6 +15,7 @@ from fastrtc import (
)
from gradio.utils import get_space
from groq import AsyncClient
from pydantic import BaseModel
cur_dir = Path(__file__).parent
@@ -24,23 +25,23 @@ load_dotenv()
groq_client = AsyncClient()
async def transcribe(audio: tuple[int, np.ndarray]):
transcript = await groq_client.audio.transcriptions.create(
async def transcribe(audio: tuple[int, np.ndarray], transcript: str):
response = await groq_client.audio.transcriptions.create(
file=("audio-file.mp3", audio_to_bytes(audio)),
model="whisper-large-v3-turbo",
response_format="verbose_json",
)
yield AdditionalOutputs(transcript.text)
yield AdditionalOutputs(transcript + "\n" + response.text)
transcript = gr.Textbox(label="Transcript")
stream = Stream(
ReplyOnPause(transcribe),
modality="audio",
mode="send",
additional_outputs=[
gr.Textbox(label="Transcript"),
],
additional_outputs_handler=lambda a, b: a + " " + b,
additional_inputs=[transcript],
additional_outputs=[transcript],
additional_outputs_handler=lambda a, b: b,
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
concurrency_limit=5 if get_space() else None,
time_limit=90 if get_space() else None,
@@ -51,11 +52,21 @@ app = FastAPI()
stream.mount(app)
class SendInput(BaseModel):
webrtc_id: str
transcript: str
@app.post("/send_input")
def send_input(body: SendInput):
stream.set_input(body.webrtc_id, body.transcript)
@app.get("/transcript")
def _(webrtc_id: str):
async def output_stream():
async for output in stream.output_stream(webrtc_id):
transcript = output.args[0]
transcript = output.args[0].split("\n")[-1]
yield f"event: output\ndata: {transcript}\n\n"
return StreamingResponse(output_stream(), media_type="text/event-stream")
@@ -73,7 +84,7 @@ if __name__ == "__main__":
import os
if (mode := os.getenv("MODE")) == "UI":
stream.ui.launch(server_port=7860, server_name="0.0.0.0")
stream.ui.launch(server_port=7860)
elif mode == "PHONE":
stream.fastphone(host="0.0.0.0", port=7860)
else:

View File

@@ -193,7 +193,8 @@
</div>
<div class="container">
<div class="transcript-container" id="transcript"></div>
<div class="transcript-container" id="transcript">
</div>
<div class="controls">
<button id="start-button">Start Recording</button>
</div>
@@ -220,13 +221,23 @@
}, 5000);
}
function handleMessage(event) {
async function handleMessage(event) {
// Handle any WebRTC data channel messages if needed
const eventJson = JSON.parse(event.data);
if (eventJson.type === "error") {
showError(eventJson.message);
} else if (eventJson.type === "send_input") {
const response = await fetch('/send_input', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
webrtc_id: webrtc_id,
transcript: ""
})
});
}
console.log('Received message:', event.data);
}
function updateButtonState() {