mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-05 18:09:23 +08:00
[feat] update some feature
sync code of fastrtc, add text support through datachannel, fix safari connect problem support chat without camera or mic
This commit is contained in:
15
demo/whisper_realtime/README.md
Normal file
15
demo/whisper_realtime/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Whisper Realtime Transcription
|
||||
emoji: 👂
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Transcribe audio in realtime with Whisper
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GROQ_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
22
demo/whisper_realtime/README_gradio.md
Normal file
22
demo/whisper_realtime/README_gradio.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
app_file: app.py
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
emoji: 👂
|
||||
license: mit
|
||||
pinned: false
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
short_description: Transcribe audio in realtime - Gradio UI version
|
||||
tags:
|
||||
- webrtc
|
||||
- websocket
|
||||
- gradio
|
||||
- secret|TWILIO_ACCOUNT_SID
|
||||
- secret|TWILIO_AUTH_TOKEN
|
||||
- secret|GROQ_API_KEY
|
||||
title: Whisper Realtime Transcription (Gradio UI)
|
||||
---
|
||||
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
93
demo/whisper_realtime/app.py
Normal file
93
demo/whisper_realtime/app.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnPause,
|
||||
Stream,
|
||||
audio_to_bytes,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from gradio.utils import get_space
|
||||
from groq import AsyncClient
|
||||
from pydantic import BaseModel
|
||||
|
||||
cur_dir = Path(__file__).parent
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
groq_client = AsyncClient()
|
||||
|
||||
|
||||
async def transcribe(audio: tuple[int, np.ndarray], transcript: str):
|
||||
response = await groq_client.audio.transcriptions.create(
|
||||
file=("audio-file.mp3", audio_to_bytes(audio)),
|
||||
model="whisper-large-v3-turbo",
|
||||
response_format="verbose_json",
|
||||
)
|
||||
yield AdditionalOutputs(transcript + "\n" + response.text)
|
||||
|
||||
|
||||
transcript = gr.Textbox(label="Transcript")
|
||||
stream = Stream(
|
||||
ReplyOnPause(transcribe),
|
||||
modality="audio",
|
||||
mode="send",
|
||||
additional_inputs=[transcript],
|
||||
additional_outputs=[transcript],
|
||||
additional_outputs_handler=lambda a, b: b,
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
class SendInput(BaseModel):
|
||||
webrtc_id: str
|
||||
transcript: str
|
||||
|
||||
|
||||
@app.post("/send_input")
|
||||
def send_input(body: SendInput):
|
||||
stream.set_input(body.webrtc_id, body.transcript)
|
||||
|
||||
|
||||
@app.get("/transcript")
|
||||
def _(webrtc_id: str):
|
||||
async def output_stream():
|
||||
async for output in stream.output_stream(webrtc_id):
|
||||
transcript = output.args[0].split("\n")[-1]
|
||||
yield f"event: output\ndata: {transcript}\n\n"
|
||||
|
||||
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
def index():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (cur_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
435
demo/whisper_realtime/index.html
Normal file
435
demo/whisper_realtime/index.html
Normal file
@@ -0,0 +1,435 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Real-time Whisper Transcription</title>
|
||||
<style>
|
||||
:root {
|
||||
--primary-gradient: linear-gradient(135deg, #f9a45c 0%, #e66465 100%);
|
||||
--background-cream: #faf8f5;
|
||||
--text-dark: #2d2d2d;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background-color: var(--background-cream);
|
||||
color: var(--text-dark);
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.hero {
|
||||
background: var(--primary-gradient);
|
||||
color: white;
|
||||
padding: 2.5rem 2rem;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.hero h1 {
|
||||
font-size: 2.5rem;
|
||||
margin: 0;
|
||||
font-weight: 600;
|
||||
letter-spacing: -0.5px;
|
||||
}
|
||||
|
||||
.hero p {
|
||||
font-size: 1rem;
|
||||
margin-top: 0.5rem;
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 1000px;
|
||||
margin: 1.5rem auto;
|
||||
padding: 0 2rem;
|
||||
}
|
||||
|
||||
.transcript-container {
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.06);
|
||||
padding: 1.5rem;
|
||||
height: 300px;
|
||||
overflow-y: auto;
|
||||
margin-bottom: 1.5rem;
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.controls {
|
||||
text-align: center;
|
||||
margin: 1.5rem 0;
|
||||
}
|
||||
|
||||
button {
|
||||
background: var(--primary-gradient);
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 10px 20px;
|
||||
font-size: 0.95rem;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
font-weight: 500;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
transform: translateY(-1px);
|
||||
box-shadow: 0 4px 12px rgba(230, 100, 101, 0.15);
|
||||
}
|
||||
|
||||
button:active {
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
/* Transcript text styling */
|
||||
.transcript-container p {
|
||||
margin: 0.4rem 0;
|
||||
padding: 0.6rem;
|
||||
background: var(--background-cream);
|
||||
border-radius: 4px;
|
||||
line-height: 1.4;
|
||||
font-size: 0.95rem;
|
||||
}
|
||||
|
||||
/* Custom scrollbar - made thinner */
|
||||
.transcript-container::-webkit-scrollbar {
|
||||
width: 6px;
|
||||
}
|
||||
|
||||
.transcript-container::-webkit-scrollbar-track {
|
||||
background: var(--background-cream);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.transcript-container::-webkit-scrollbar-thumb {
|
||||
background: #e66465;
|
||||
border-radius: 3px;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.transcript-container::-webkit-scrollbar-thumb:hover {
|
||||
background: #f9a45c;
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
|
||||
/* Add styles for audio visualization */
|
||||
.icon-with-spinner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.spinner {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border: 2px solid white;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.pulse-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.pulse-circle {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
background-color: white;
|
||||
opacity: 0.2;
|
||||
flex-shrink: 0;
|
||||
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||
transition: transform 0.1s ease;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div class="hero">
|
||||
<h1>Real-time Transcription</h1>
|
||||
<p>Powered by Groq and FastRTC</p>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="transcript-container" id="transcript">
|
||||
</div>
|
||||
<div class="controls">
|
||||
<button id="start-button">Start Recording</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let peerConnection;
|
||||
let webrtc_id;
|
||||
let audioContext, analyser, audioSource;
|
||||
let audioLevel = 0;
|
||||
let animationFrame;
|
||||
|
||||
const startButton = document.getElementById('start-button');
|
||||
const transcriptDiv = document.getElementById('transcript');
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
async function handleMessage(event) {
|
||||
// Handle any WebRTC data channel messages if needed
|
||||
const eventJson = JSON.parse(event.data);
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
} else if (eventJson.type === "send_input") {
|
||||
const response = await fetch('/send_input', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
webrtc_id: webrtc_id,
|
||||
transcript: ""
|
||||
})
|
||||
});
|
||||
}
|
||||
console.log('Received message:', event.data);
|
||||
|
||||
}
|
||||
|
||||
function updateButtonState() {
|
||||
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||
startButton.innerHTML = `
|
||||
<div class="icon-with-spinner">
|
||||
<div class="spinner"></div>
|
||||
<span>Connecting...</span>
|
||||
</div>
|
||||
`;
|
||||
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||
startButton.innerHTML = `
|
||||
<div class="pulse-container">
|
||||
<div class="pulse-circle"></div>
|
||||
<span>Stop Recording</span>
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
startButton.innerHTML = 'Start Recording';
|
||||
}
|
||||
}
|
||||
|
||||
function setupAudioVisualization(stream) {
|
||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||
analyser = audioContext.createAnalyser();
|
||||
audioSource = audioContext.createMediaStreamSource(stream);
|
||||
audioSource.connect(analyser);
|
||||
analyser.fftSize = 64;
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
function updateAudioLevel() {
|
||||
analyser.getByteFrequencyData(dataArray);
|
||||
const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
|
||||
audioLevel = average / 255;
|
||||
|
||||
const pulseCircle = document.querySelector('.pulse-circle');
|
||||
if (pulseCircle) {
|
||||
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||
}
|
||||
|
||||
animationFrame = requestAnimationFrame(updateAudioLevel);
|
||||
}
|
||||
updateAudioLevel();
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
|
||||
setupAudioVisualization(stream);
|
||||
|
||||
stream.getTracks().forEach(track => {
|
||||
peerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
// Add connection state change listener
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
console.log('connectionstatechange', peerConnection.connectionState);
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
updateButtonState();
|
||||
});
|
||||
|
||||
// Create data channel for messages
|
||||
const dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = handleMessage;
|
||||
|
||||
// Create and send offer
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
startButton.textContent = 'Start Recording';
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
// Create event stream to receive transcripts
|
||||
const eventSource = new EventSource('/transcript?webrtc_id=' + webrtc_id);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
appendTranscript(event.data);
|
||||
});
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
startButton.textContent = 'Start Recording';
|
||||
}
|
||||
}
|
||||
|
||||
function appendTranscript(text) {
|
||||
const p = document.createElement('p');
|
||||
p.textContent = text;
|
||||
transcriptDiv.appendChild(p);
|
||||
transcriptDiv.scrollTop = transcriptDiv.scrollHeight;
|
||||
}
|
||||
|
||||
function stop() {
|
||||
if (animationFrame) {
|
||||
cancelAnimationFrame(animationFrame);
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
audioContext = null;
|
||||
analyser = null;
|
||||
audioSource = null;
|
||||
}
|
||||
if (peerConnection) {
|
||||
if (peerConnection.getTransceivers) {
|
||||
peerConnection.getTransceivers().forEach(transceiver => {
|
||||
if (transceiver.stop) {
|
||||
transceiver.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (peerConnection.getSenders) {
|
||||
peerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
peerConnection.close();
|
||||
}, 500);
|
||||
}
|
||||
audioLevel = 0;
|
||||
updateButtonState();
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
if (startButton.textContent === 'Start Recording') {
|
||||
setupWebRTC();
|
||||
} else {
|
||||
stop();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
4
demo/whisper_realtime/requirements.txt
Normal file
4
demo/whisper_realtime/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc[vad]
|
||||
groq
|
||||
python-dotenv
|
||||
twilio
|
||||
Reference in New Issue
Block a user