mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-05 18:09:23 +08:00
Raise errors automatically (#69)
* Add auto errors * change code --------- Co-authored-by: Freddy Boulton <freddyboulton@hf-freddy.local>
This commit is contained in:
@@ -14,7 +14,6 @@ from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnPause,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
get_tts_model,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
@@ -38,41 +37,36 @@ def response(
|
||||
audio: tuple[int, np.ndarray],
|
||||
chatbot: list[dict] | None = None,
|
||||
):
|
||||
try:
|
||||
chatbot = chatbot or []
|
||||
messages = [{"role": d["role"], "content": d["content"]} for d in chatbot]
|
||||
prompt = groq_client.audio.transcriptions.create(
|
||||
file=("audio-file.mp3", audio_to_bytes(audio)),
|
||||
model="whisper-large-v3-turbo",
|
||||
response_format="verbose_json",
|
||||
).text
|
||||
chatbot = chatbot or []
|
||||
messages = [{"role": d["role"], "content": d["content"]} for d in chatbot]
|
||||
prompt = groq_client.audio.transcriptions.create(
|
||||
file=("audio-file.mp3", audio_to_bytes(audio)),
|
||||
model="whisper-large-v3-turbo",
|
||||
response_format="verbose_json",
|
||||
).text
|
||||
chatbot.append({"role": "user", "content": prompt})
|
||||
yield AdditionalOutputs(chatbot)
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
response = claude_client.messages.create(
|
||||
model="claude-3-5-haiku-20241022",
|
||||
max_tokens=512,
|
||||
messages=messages, # type: ignore
|
||||
)
|
||||
response_text = " ".join(
|
||||
block.text # type: ignore
|
||||
for block in response.content
|
||||
if getattr(block, "type", None) == "text"
|
||||
)
|
||||
chatbot.append({"role": "assistant", "content": response_text})
|
||||
|
||||
print("prompt", prompt)
|
||||
chatbot.append({"role": "user", "content": prompt})
|
||||
start = time.time()
|
||||
|
||||
print("starting tts", start)
|
||||
for i, chunk in enumerate(tts_model.stream_tts_sync(response_text)):
|
||||
print("chunk", i, time.time() - start)
|
||||
yield chunk
|
||||
print("finished tts", time.time() - start)
|
||||
yield AdditionalOutputs(chatbot)
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
response = claude_client.messages.create(
|
||||
model="claude-3-5-haiku-20241022",
|
||||
max_tokens=512,
|
||||
messages=messages, # type: ignore
|
||||
)
|
||||
response_text = " ".join(
|
||||
block.text # type: ignore
|
||||
for block in response.content
|
||||
if getattr(block, "type", None) == "text"
|
||||
)
|
||||
chatbot.append({"role": "assistant", "content": response_text})
|
||||
|
||||
start = time.time()
|
||||
|
||||
print("starting tts", start)
|
||||
for i, chunk in enumerate(tts_model.stream_tts_sync(response_text)):
|
||||
print("chunk", i, time.time() - start)
|
||||
yield chunk
|
||||
print("finished tts", time.time() - start)
|
||||
yield AdditionalOutputs(chatbot)
|
||||
except Exception as e:
|
||||
raise WebRTCError(str(e))
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(type="messages")
|
||||
|
||||
Reference in New Issue
Block a user