mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-04 17:39:23 +08:00
Add example for "Talk to Azure OpenAi" (#181)
* Add example for "Talk to Azure OpenAi" * Code --------- Co-authored-by: Freddy Boulton <alfonsoboulton@gmail.com>
This commit is contained in:
15
demo/talk_to_azure_openai/README.md
Normal file
15
demo/talk_to_azure_openai/README.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
title: Talk to Azure OpenAI
|
||||||
|
emoji: 🗣️
|
||||||
|
colorFrom: purple
|
||||||
|
colorTo: red
|
||||||
|
sdk: gradio
|
||||||
|
sdk_version: 5.16.0
|
||||||
|
app_file: app.py
|
||||||
|
pinned: false
|
||||||
|
license: mit
|
||||||
|
short_description: Talk to Azure OpenAI using their multimodal API
|
||||||
|
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|OPENAI_API_KEY]
|
||||||
|
---
|
||||||
|
|
||||||
|
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||||
15
demo/talk_to_azure_openai/README_gradio.md
Normal file
15
demo/talk_to_azure_openai/README_gradio.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
title: Talk to Azure OpenAI (Gradio UI)
|
||||||
|
emoji: 🗣️
|
||||||
|
colorFrom: purple
|
||||||
|
colorTo: red
|
||||||
|
sdk: gradio
|
||||||
|
sdk_version: 5.16.0
|
||||||
|
app_file: app.py
|
||||||
|
pinned: false
|
||||||
|
license: mit
|
||||||
|
short_description: Talk to Azure OpenAI (Gradio UI)
|
||||||
|
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|OPENAI_API_KEY]
|
||||||
|
---
|
||||||
|
|
||||||
|
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||||
233
demo/talk_to_azure_openai/app.py
Normal file
233
demo/talk_to_azure_openai/app.py
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
import asyncio
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
import sounddevice as sd
|
||||||
|
|
||||||
|
import gradio as gr
|
||||||
|
import numpy as np
|
||||||
|
import aiohttp # pip install aiohttp
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||||
|
from fastrtc import (
|
||||||
|
AdditionalOutputs,
|
||||||
|
AsyncStreamHandler,
|
||||||
|
Stream,
|
||||||
|
get_twilio_turn_credentials,
|
||||||
|
wait_for_item,
|
||||||
|
)
|
||||||
|
from gradio.utils import get_space
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
cur_dir = Path(__file__).parent
|
||||||
|
load_dotenv("key.env")
|
||||||
|
# sd.default.device = (3, 3) # (Input-Gerät, Output-Gerät)
|
||||||
|
|
||||||
|
# print(f"Used Mic: {sd.query_devices(3)['name']}")
|
||||||
|
# print(f"Used Speaker: {sd.query_devices(3)['name']}")
|
||||||
|
SAMPLE_RATE = 24000
|
||||||
|
|
||||||
|
instruction = """
|
||||||
|
<Role>
|
||||||
|
You a helpful assistant.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class AzureAudioHandler(AsyncStreamHandler):
|
||||||
|
def __init__(self) -> None:
|
||||||
|
super().__init__(
|
||||||
|
expected_layout="mono",
|
||||||
|
output_sample_rate=SAMPLE_RATE,
|
||||||
|
output_frame_size=480,
|
||||||
|
input_sample_rate=SAMPLE_RATE,
|
||||||
|
)
|
||||||
|
self.ws = None
|
||||||
|
self.session = None
|
||||||
|
self.output_queue = asyncio.Queue()
|
||||||
|
# This internal buffer is not used directly in receive_messages.
|
||||||
|
# Instead, multiple audio chunks are collected in the emit() method.
|
||||||
|
# If needed, a continuous buffer can also be implemented here.
|
||||||
|
# self.audio_buffer = bytearray()
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return AzureAudioHandler()
|
||||||
|
|
||||||
|
async def start_up(self):
|
||||||
|
"""Connects to the Azure Real-time Audio API via WebSocket using aiohttp."""
|
||||||
|
# Replace the following placeholders with your actual Azure values:
|
||||||
|
azure_api_key = "your-api-key" # e.g., "your-api-key"
|
||||||
|
azure_resource_name = "your-resource-name" # e.g., "aigdopenai"
|
||||||
|
deployment_id = "your-deployment-id" # e.g., "gpt-4o-realtime-preview"
|
||||||
|
api_version = "2024-10-01-preview"
|
||||||
|
azure_endpoint = (
|
||||||
|
f"wss://{azure_resource_name}.openai.azure.com/openai/realtime"
|
||||||
|
f"?api-version={api_version}&deployment={deployment_id}"
|
||||||
|
)
|
||||||
|
headers = {"api-key": azure_api_key}
|
||||||
|
|
||||||
|
self.session = aiohttp.ClientSession()
|
||||||
|
self.ws = await self.session.ws_connect(azure_endpoint, headers=headers)
|
||||||
|
# Send initial session parameters
|
||||||
|
session_update_message = {
|
||||||
|
"type": "session.update",
|
||||||
|
"session": {
|
||||||
|
"turn_detection": {"type": "server_vad"},
|
||||||
|
"instructions": instruction,
|
||||||
|
"voice": "ballad", # Possible voices see https://platform.openai.com/docs/guides/realtime-model-capabilities#voice-options
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
await self.ws.send_str(json.dumps(session_update_message))
|
||||||
|
# Start receiving messages asynchronously
|
||||||
|
asyncio.create_task(self.receive_messages())
|
||||||
|
|
||||||
|
async def receive_messages(self):
|
||||||
|
"""Handles incoming WebSocket messages and processes them accordingly."""
|
||||||
|
async for msg in self.ws:
|
||||||
|
if msg.type == aiohttp.WSMsgType.TEXT:
|
||||||
|
print("Received event:", msg.data) # Debug output
|
||||||
|
event = json.loads(msg.data)
|
||||||
|
event_type = event.get("type")
|
||||||
|
if event_type in ["final", "response.audio_transcript.done"]:
|
||||||
|
transcript = event.get("transcript", "")
|
||||||
|
|
||||||
|
# Wrap the transcript in an object with a .transcript attribute
|
||||||
|
class TranscriptEvent:
|
||||||
|
pass
|
||||||
|
|
||||||
|
te = TranscriptEvent()
|
||||||
|
te.transcript = transcript
|
||||||
|
await self.output_queue.put(AdditionalOutputs(te))
|
||||||
|
elif event_type == "partial":
|
||||||
|
print("Partial transcript:", event.get("transcript", ""))
|
||||||
|
elif event_type == "response.audio.delta":
|
||||||
|
audio_message = event.get("delta")
|
||||||
|
if audio_message:
|
||||||
|
try:
|
||||||
|
audio_bytes = base64.b64decode(audio_message)
|
||||||
|
# Assuming 16-bit PCM (int16)
|
||||||
|
audio_array = np.frombuffer(audio_bytes, dtype=np.int16)
|
||||||
|
# Interpret as mono audio:
|
||||||
|
audio_array = audio_array.reshape(1, -1)
|
||||||
|
# Instead of playing the audio, add the chunk to the output queue
|
||||||
|
await self.output_queue.put(
|
||||||
|
(self.output_sample_rate, audio_array)
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print("Error processing audio data:", e)
|
||||||
|
else:
|
||||||
|
print("Unknown event:", event)
|
||||||
|
elif msg.type == aiohttp.WSMsgType.ERROR:
|
||||||
|
break
|
||||||
|
|
||||||
|
async def receive(self, frame: tuple[int, np.ndarray]) -> None:
|
||||||
|
"""Sends received audio frames to the WebSocket."""
|
||||||
|
if not self.ws or self.ws.closed:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
_, array = frame
|
||||||
|
array = array.squeeze()
|
||||||
|
audio_message = base64.b64encode(array.tobytes()).decode("utf-8")
|
||||||
|
message = {"type": "input_audio_buffer.append", "audio": audio_message}
|
||||||
|
await self.ws.send_str(json.dumps(message))
|
||||||
|
except aiohttp.ClientConnectionError as e:
|
||||||
|
print("Connection closed while sending:", e)
|
||||||
|
return
|
||||||
|
|
||||||
|
async def emit(self) -> tuple[int, np.ndarray] | AdditionalOutputs | None:
|
||||||
|
"""
|
||||||
|
Collects multiple audio chunks from the queue before returning them as a single contiguous audio array.
|
||||||
|
This helps smooth playback.
|
||||||
|
"""
|
||||||
|
item = await wait_for_item(self.output_queue)
|
||||||
|
# If it's a transcript event, return it immediately.
|
||||||
|
if not isinstance(item, tuple):
|
||||||
|
return item
|
||||||
|
# Otherwise, it is an audio chunk (sample_rate, audio_array)
|
||||||
|
sample_rate, first_chunk = item
|
||||||
|
audio_chunks = [first_chunk]
|
||||||
|
# Define a minimum length (e.g., 0.1 seconds)
|
||||||
|
min_samples = int(SAMPLE_RATE * 0.1) # 0.1 sec
|
||||||
|
# Collect more audio chunks until we have enough samples
|
||||||
|
while audio_chunks and audio_chunks[0].shape[1] < min_samples:
|
||||||
|
try:
|
||||||
|
extra = self.output_queue.get_nowait()
|
||||||
|
if isinstance(extra, tuple):
|
||||||
|
_, chunk = extra
|
||||||
|
audio_chunks.append(chunk)
|
||||||
|
else:
|
||||||
|
# If it's not an audio chunk, put it back
|
||||||
|
await self.output_queue.put(extra)
|
||||||
|
break
|
||||||
|
except asyncio.QueueEmpty:
|
||||||
|
break
|
||||||
|
# Concatenate collected chunks along the time axis (axis=1)
|
||||||
|
full_audio = np.concatenate(audio_chunks, axis=1)
|
||||||
|
return (sample_rate, full_audio)
|
||||||
|
|
||||||
|
async def shutdown(self) -> None:
|
||||||
|
"""Closes the WebSocket and session properly."""
|
||||||
|
if self.ws:
|
||||||
|
await self.ws.close()
|
||||||
|
self.ws = None
|
||||||
|
if self.session:
|
||||||
|
await self.session.close()
|
||||||
|
self.session = None
|
||||||
|
|
||||||
|
|
||||||
|
def update_chatbot(chatbot: list[dict], response) -> list[dict]:
|
||||||
|
"""Appends the AI assistant's transcript response to the chatbot messages."""
|
||||||
|
chatbot.append({"role": "assistant", "content": response.transcript})
|
||||||
|
return chatbot
|
||||||
|
|
||||||
|
|
||||||
|
chatbot = gr.Chatbot(type="messages")
|
||||||
|
latest_message = gr.Textbox(type="text", visible=False)
|
||||||
|
stream = Stream(
|
||||||
|
AzureAudioHandler(),
|
||||||
|
mode="send-receive",
|
||||||
|
modality="audio",
|
||||||
|
additional_inputs=[chatbot],
|
||||||
|
additional_outputs=[chatbot],
|
||||||
|
additional_outputs_handler=update_chatbot,
|
||||||
|
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||||
|
concurrency_limit=5 if get_space() else None,
|
||||||
|
time_limit=90 if get_space() else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
stream.mount(app)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/")
|
||||||
|
async def _():
|
||||||
|
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||||
|
html_content = (cur_dir / "index.html").read_text()
|
||||||
|
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||||
|
return HTMLResponse(content=html_content)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/outputs")
|
||||||
|
def _(webrtc_id: str):
|
||||||
|
async def output_stream():
|
||||||
|
import json
|
||||||
|
|
||||||
|
async for output in stream.output_stream(webrtc_id):
|
||||||
|
s = json.dumps({"role": "assistant", "content": output.args[0].transcript})
|
||||||
|
yield f"event: output\ndata: {s}\n\n"
|
||||||
|
|
||||||
|
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import os
|
||||||
|
|
||||||
|
if (mode := os.getenv("MODE")) == "UI":
|
||||||
|
stream.ui.launch(server_port=7860)
|
||||||
|
elif mode == "PHONE":
|
||||||
|
stream.fastphone(host="0.0.0.0", port=7860)
|
||||||
|
else:
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
|
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||||
356
demo/talk_to_azure_openai/index.html
Normal file
356
demo/talk_to_azure_openai/index.html
Normal file
@@ -0,0 +1,356 @@
|
|||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Azure OpenAI Real-Time Chat</title>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: "SF Pro Display", -apple-system, BlinkMacSystemFont, sans-serif;
|
||||||
|
background-color: #0a0a0a;
|
||||||
|
color: #ffffff;
|
||||||
|
margin: 0;
|
||||||
|
padding: 20px;
|
||||||
|
height: 100vh;
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
|
.container {
|
||||||
|
max-width: 800px;
|
||||||
|
margin: 0 auto;
|
||||||
|
height: calc(100% - 100px);
|
||||||
|
}
|
||||||
|
.logo {
|
||||||
|
text-align: center;
|
||||||
|
margin-bottom: 40px;
|
||||||
|
}
|
||||||
|
.chat-container {
|
||||||
|
border: 1px solid #333;
|
||||||
|
padding: 20px;
|
||||||
|
height: 90%;
|
||||||
|
box-sizing: border-box;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
.chat-messages {
|
||||||
|
flex-grow: 1;
|
||||||
|
overflow-y: auto;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
padding: 10px;
|
||||||
|
}
|
||||||
|
.message {
|
||||||
|
margin-bottom: 20px;
|
||||||
|
padding: 12px;
|
||||||
|
border-radius: 4px;
|
||||||
|
font-size: 16px;
|
||||||
|
line-height: 1.5;
|
||||||
|
}
|
||||||
|
.message.user {
|
||||||
|
background-color: #1a1a1a;
|
||||||
|
margin-left: 20%;
|
||||||
|
}
|
||||||
|
.message.assistant {
|
||||||
|
background-color: #262626;
|
||||||
|
margin-right: 20%;
|
||||||
|
}
|
||||||
|
.controls {
|
||||||
|
text-align: center;
|
||||||
|
margin-top: 20px;
|
||||||
|
}
|
||||||
|
button {
|
||||||
|
background-color: transparent;
|
||||||
|
color: #ffffff;
|
||||||
|
border: 1px solid #ffffff;
|
||||||
|
padding: 12px 24px;
|
||||||
|
font-family: inherit;
|
||||||
|
font-size: 16px;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.3s;
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 1px;
|
||||||
|
}
|
||||||
|
button:hover {
|
||||||
|
border-width: 2px;
|
||||||
|
transform: scale(1.02);
|
||||||
|
box-shadow: 0 0 10px rgba(255, 255, 255, 0.2);
|
||||||
|
}
|
||||||
|
#audio-output {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
.icon-with-spinner {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
gap: 12px;
|
||||||
|
min-width: 180px;
|
||||||
|
}
|
||||||
|
.spinner {
|
||||||
|
width: 20px;
|
||||||
|
height: 20px;
|
||||||
|
border: 2px solid #ffffff;
|
||||||
|
border-top-color: transparent;
|
||||||
|
border-radius: 50%;
|
||||||
|
animation: spin 1s linear infinite;
|
||||||
|
flex-shrink: 0;
|
||||||
|
}
|
||||||
|
@keyframes spin {
|
||||||
|
to {
|
||||||
|
transform: rotate(360deg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.pulse-container {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
gap: 12px;
|
||||||
|
min-width: 180px;
|
||||||
|
}
|
||||||
|
.pulse-circle {
|
||||||
|
width: 20px;
|
||||||
|
height: 20px;
|
||||||
|
border-radius: 50%;
|
||||||
|
background-color: #ffffff;
|
||||||
|
opacity: 0.2;
|
||||||
|
flex-shrink: 0;
|
||||||
|
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||||
|
transition: transform 0.1s ease;
|
||||||
|
}
|
||||||
|
/* Add styles for toast notifications */
|
||||||
|
.toast {
|
||||||
|
position: fixed;
|
||||||
|
top: 20px;
|
||||||
|
left: 50%;
|
||||||
|
transform: translateX(-50%);
|
||||||
|
padding: 16px 24px;
|
||||||
|
border-radius: 4px;
|
||||||
|
font-size: 14px;
|
||||||
|
z-index: 1000;
|
||||||
|
display: none;
|
||||||
|
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||||
|
}
|
||||||
|
.toast.error {
|
||||||
|
background-color: #f44336;
|
||||||
|
color: white;
|
||||||
|
}
|
||||||
|
.toast.warning {
|
||||||
|
background-color: #ffd700;
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<!-- Add toast element after body opening tag -->
|
||||||
|
<div id="error-toast" class="toast"></div>
|
||||||
|
<div class="container">
|
||||||
|
<div class="logo">
|
||||||
|
<h1>OpenAI Real-Time Chat</h1>
|
||||||
|
</div>
|
||||||
|
<div class="chat-container">
|
||||||
|
<div class="chat-messages" id="chat-messages"></div>
|
||||||
|
</div>
|
||||||
|
<div class="controls">
|
||||||
|
<button id="start-button">Start Conversation</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<audio id="audio-output"></audio>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
let peerConnection;
|
||||||
|
let webrtc_id;
|
||||||
|
const audioOutput = document.getElementById('audio-output');
|
||||||
|
const startButton = document.getElementById('start-button');
|
||||||
|
const chatMessages = document.getElementById('chat-messages');
|
||||||
|
let audioLevel = 0;
|
||||||
|
let animationFrame;
|
||||||
|
let audioContext, analyser, audioSource;
|
||||||
|
function updateButtonState() {
|
||||||
|
const button = document.getElementById('start-button');
|
||||||
|
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||||
|
button.innerHTML = `
|
||||||
|
<div class="icon-with-spinner">
|
||||||
|
<div class="spinner"></div>
|
||||||
|
<span>Connecting...</span>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||||
|
button.innerHTML = `
|
||||||
|
<div class="pulse-container">
|
||||||
|
<div class="pulse-circle"></div>
|
||||||
|
<span>Stop Conversation</span>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
} else {
|
||||||
|
button.innerHTML = 'Start Conversation';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function setupAudioVisualization(stream) {
|
||||||
|
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||||
|
analyser = audioContext.createAnalyser();
|
||||||
|
audioSource = audioContext.createMediaStreamSource(stream);
|
||||||
|
audioSource.connect(analyser);
|
||||||
|
analyser.fftSize = 64;
|
||||||
|
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||||
|
function updateAudioLevel() {
|
||||||
|
analyser.getByteFrequencyData(dataArray);
|
||||||
|
const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
|
||||||
|
audioLevel = average / 255;
|
||||||
|
// Update CSS variable instead of rebuilding the button
|
||||||
|
const pulseCircle = document.querySelector('.pulse-circle');
|
||||||
|
if (pulseCircle) {
|
||||||
|
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||||
|
}
|
||||||
|
animationFrame = requestAnimationFrame(updateAudioLevel);
|
||||||
|
}
|
||||||
|
updateAudioLevel();
|
||||||
|
}
|
||||||
|
function showError(message) {
|
||||||
|
const toast = document.getElementById('error-toast');
|
||||||
|
toast.textContent = message;
|
||||||
|
toast.style.display = 'block';
|
||||||
|
// Hide toast after 5 seconds
|
||||||
|
setTimeout(() => {
|
||||||
|
toast.style.display = 'none';
|
||||||
|
}, 5000);
|
||||||
|
}
|
||||||
|
async function setupWebRTC() {
|
||||||
|
isConnecting = true;
|
||||||
|
const config = __RTC_CONFIGURATION__;
|
||||||
|
peerConnection = new RTCPeerConnection(config);
|
||||||
|
const timeoutId = setTimeout(() => {
|
||||||
|
const toast = document.getElementById('error-toast');
|
||||||
|
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||||
|
toast.className = 'toast warning';
|
||||||
|
toast.style.display = 'block';
|
||||||
|
// Hide warning after 5 seconds
|
||||||
|
setTimeout(() => {
|
||||||
|
toast.style.display = 'none';
|
||||||
|
}, 5000);
|
||||||
|
}, 5000);
|
||||||
|
try {
|
||||||
|
const stream = await navigator.mediaDevices.getUserMedia({
|
||||||
|
audio: true
|
||||||
|
});
|
||||||
|
setupAudioVisualization(stream);
|
||||||
|
stream.getTracks().forEach(track => {
|
||||||
|
peerConnection.addTrack(track, stream);
|
||||||
|
});
|
||||||
|
peerConnection.addEventListener('track', (evt) => {
|
||||||
|
if (audioOutput.srcObject !== evt.streams[0]) {
|
||||||
|
audioOutput.srcObject = evt.streams[0];
|
||||||
|
audioOutput.play();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
const dataChannel = peerConnection.createDataChannel('text');
|
||||||
|
dataChannel.onmessage = (event) => {
|
||||||
|
const eventJson = JSON.parse(event.data);
|
||||||
|
if (eventJson.type === "error") {
|
||||||
|
showError(eventJson.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
const offer = await peerConnection.createOffer();
|
||||||
|
await peerConnection.setLocalDescription(offer);
|
||||||
|
await new Promise((resolve) => {
|
||||||
|
if (peerConnection.iceGatheringState === "complete") {
|
||||||
|
resolve();
|
||||||
|
} else {
|
||||||
|
const checkState = () => {
|
||||||
|
if (peerConnection.iceGatheringState === "complete") {
|
||||||
|
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||||
|
resolve();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
peerConnection.addEventListener('connectionstatechange', () => {
|
||||||
|
console.log('connectionstatechange', peerConnection.connectionState);
|
||||||
|
if (peerConnection.connectionState === 'connected') {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
const toast = document.getElementById('error-toast');
|
||||||
|
toast.style.display = 'none';
|
||||||
|
}
|
||||||
|
updateButtonState();
|
||||||
|
});
|
||||||
|
webrtc_id = Math.random().toString(36).substring(7);
|
||||||
|
const response = await fetch('/webrtc/offer', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
sdp: peerConnection.localDescription.sdp,
|
||||||
|
type: peerConnection.localDescription.type,
|
||||||
|
webrtc_id: webrtc_id
|
||||||
|
})
|
||||||
|
});
|
||||||
|
const serverResponse = await response.json();
|
||||||
|
if (serverResponse.status === 'failed') {
|
||||||
|
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||||
|
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||||
|
: serverResponse.meta.error);
|
||||||
|
stop();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
await peerConnection.setRemoteDescription(serverResponse);
|
||||||
|
const eventSource = new EventSource('/outputs?webrtc_id=' + webrtc_id);
|
||||||
|
eventSource.addEventListener("output", (event) => {
|
||||||
|
const eventJson = JSON.parse(event.data);
|
||||||
|
addMessage("assistant", eventJson.content);
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
console.error('Error setting up WebRTC:', err);
|
||||||
|
showError('Failed to establish connection. Please try again.');
|
||||||
|
stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function addMessage(role, content) {
|
||||||
|
const messageDiv = document.createElement('div');
|
||||||
|
messageDiv.classList.add('message', role);
|
||||||
|
messageDiv.textContent = content;
|
||||||
|
chatMessages.appendChild(messageDiv);
|
||||||
|
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||||
|
}
|
||||||
|
function stop() {
|
||||||
|
if (animationFrame) {
|
||||||
|
cancelAnimationFrame(animationFrame);
|
||||||
|
}
|
||||||
|
if (audioContext) {
|
||||||
|
audioContext.close();
|
||||||
|
audioContext = null;
|
||||||
|
analyser = null;
|
||||||
|
audioSource = null;
|
||||||
|
}
|
||||||
|
if (peerConnection) {
|
||||||
|
if (peerConnection.getTransceivers) {
|
||||||
|
peerConnection.getTransceivers().forEach(transceiver => {
|
||||||
|
if (transceiver.stop) {
|
||||||
|
transceiver.stop();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (peerConnection.getSenders) {
|
||||||
|
peerConnection.getSenders().forEach(sender => {
|
||||||
|
if (sender.track && sender.track.stop) sender.track.stop();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
console.log('closing');
|
||||||
|
peerConnection.close();
|
||||||
|
}
|
||||||
|
updateButtonState();
|
||||||
|
audioLevel = 0;
|
||||||
|
}
|
||||||
|
startButton.addEventListener('click', () => {
|
||||||
|
console.log('clicked');
|
||||||
|
console.log(peerConnection, peerConnection?.connectionState);
|
||||||
|
if (!peerConnection || peerConnection.connectionState !== 'connected') {
|
||||||
|
setupWebRTC();
|
||||||
|
} else {
|
||||||
|
console.log('stopping');
|
||||||
|
stop();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
||||||
123
demo/talk_to_azure_openai/requirements.txt
Normal file
123
demo/talk_to_azure_openai/requirements.txt
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
aiofiles==23.2.1
|
||||||
|
aiohappyeyeballs==2.6.1
|
||||||
|
aiohttp==3.11.13
|
||||||
|
aiohttp-retry==2.9.1
|
||||||
|
aioice==0.9.0
|
||||||
|
aiortc==1.10.1
|
||||||
|
aiosignal==1.3.2
|
||||||
|
annotated-types==0.7.0
|
||||||
|
anyio==4.8.0
|
||||||
|
attrs==25.2.0
|
||||||
|
audioread==3.0.1
|
||||||
|
av==13.1.0
|
||||||
|
babel==2.17.0
|
||||||
|
certifi==2025.1.31
|
||||||
|
cffi==1.17.1
|
||||||
|
charset-normalizer==3.4.1
|
||||||
|
click==8.1.8
|
||||||
|
colorama==0.4.6
|
||||||
|
coloredlogs==15.0.1
|
||||||
|
colorlog==6.9.0
|
||||||
|
cryptography==44.0.2
|
||||||
|
csvw==3.5.1
|
||||||
|
decorator==5.2.1
|
||||||
|
distro==1.9.0
|
||||||
|
dlinfo==2.0.0
|
||||||
|
dnspython==2.7.0
|
||||||
|
espeakng-loader==0.2.4
|
||||||
|
fastapi==0.115.11
|
||||||
|
fastrtc==0.0.14
|
||||||
|
ffmpy==0.5.0
|
||||||
|
filelock==3.17.0
|
||||||
|
flatbuffers==25.2.10
|
||||||
|
frozenlist==1.5.0
|
||||||
|
fsspec==2025.3.0
|
||||||
|
google-crc32c==1.6.0
|
||||||
|
gradio==5.20.1
|
||||||
|
gradio_client==1.7.2
|
||||||
|
groovy==0.1.2
|
||||||
|
h11==0.14.0
|
||||||
|
httpcore==1.0.7
|
||||||
|
httpx==0.28.1
|
||||||
|
huggingface-hub==0.29.3
|
||||||
|
humanfriendly==10.0
|
||||||
|
idna==3.10
|
||||||
|
ifaddr==0.2.0
|
||||||
|
isodate==0.7.2
|
||||||
|
Jinja2==3.1.6
|
||||||
|
jiter==0.9.0
|
||||||
|
joblib==1.4.2
|
||||||
|
jsonschema==4.23.0
|
||||||
|
jsonschema-specifications==2024.10.1
|
||||||
|
kokoro-onnx==0.4.5
|
||||||
|
language-tags==1.2.0
|
||||||
|
lazy_loader==0.4
|
||||||
|
librosa==0.11.0
|
||||||
|
llvmlite==0.44.0
|
||||||
|
markdown-it-py==3.0.0
|
||||||
|
MarkupSafe==2.1.5
|
||||||
|
mdurl==0.1.2
|
||||||
|
mpmath==1.3.0
|
||||||
|
msgpack==1.1.0
|
||||||
|
multidict==6.1.0
|
||||||
|
numba==0.61.0
|
||||||
|
numpy==2.1.3
|
||||||
|
onnxruntime==1.21.0
|
||||||
|
openai==1.66.2
|
||||||
|
orjson==3.10.15
|
||||||
|
packaging==24.2
|
||||||
|
pandas==2.2.3
|
||||||
|
phonemizer-fork==3.3.1
|
||||||
|
pillow==11.1.0
|
||||||
|
platformdirs==4.3.6
|
||||||
|
pooch==1.8.2
|
||||||
|
propcache==0.3.0
|
||||||
|
protobuf==6.30.0
|
||||||
|
pycparser==2.22
|
||||||
|
pydantic==2.10.6
|
||||||
|
pydantic_core==2.27.2
|
||||||
|
pydub==0.25.1
|
||||||
|
pyee==12.1.1
|
||||||
|
Pygments==2.19.1
|
||||||
|
PyJWT==2.10.1
|
||||||
|
pylibsrtp==0.11.0
|
||||||
|
pyOpenSSL==25.0.0
|
||||||
|
pyparsing==3.2.1
|
||||||
|
python-dateutil==2.9.0.post0
|
||||||
|
python-dotenv==1.0.1
|
||||||
|
python-multipart==0.0.20
|
||||||
|
pytz==2025.1
|
||||||
|
PyYAML==6.0.2
|
||||||
|
rdflib==7.1.3
|
||||||
|
referencing==0.36.2
|
||||||
|
regex==2024.11.6
|
||||||
|
requests==2.32.3
|
||||||
|
rfc3986==1.5.0
|
||||||
|
rich==13.9.4
|
||||||
|
rpds-py==0.23.1
|
||||||
|
ruff==0.9.10
|
||||||
|
safehttpx==0.1.6
|
||||||
|
scikit-learn==1.6.1
|
||||||
|
scipy==1.15.2
|
||||||
|
segments==2.3.0
|
||||||
|
semantic-version==2.10.0
|
||||||
|
shellingham==1.5.4
|
||||||
|
six==1.17.0
|
||||||
|
sniffio==1.3.1
|
||||||
|
sounddevice==0.5.1
|
||||||
|
soundfile==0.13.1
|
||||||
|
soxr==0.5.0.post1
|
||||||
|
starlette==0.46.1
|
||||||
|
sympy==1.13.3
|
||||||
|
threadpoolctl==3.5.0
|
||||||
|
tomlkit==0.13.2
|
||||||
|
tqdm==4.67.1
|
||||||
|
twilio==9.5.0
|
||||||
|
typer==0.15.2
|
||||||
|
typing_extensions==4.12.2
|
||||||
|
tzdata==2025.1
|
||||||
|
uritemplate==4.1.1
|
||||||
|
urllib3==2.3.0
|
||||||
|
uvicorn==0.34.0
|
||||||
|
websockets==15.0.1
|
||||||
|
yarl==1.18.3
|
||||||
@@ -35,6 +35,7 @@ A collection of applications built with FastRTC. Click on the tags below to find
|
|||||||
<button class="tag-button" data-tag="kyutai"><code>Kyutai</code></button>
|
<button class="tag-button" data-tag="kyutai"><code>Kyutai</code></button>
|
||||||
<button class="tag-button" data-tag="agentic"><code>Agentic</code></button>
|
<button class="tag-button" data-tag="agentic"><code>Agentic</code></button>
|
||||||
<button class="tag-button" data-tag="local"><code>Local Models</code></button>
|
<button class="tag-button" data-tag="local"><code>Local Models</code></button>
|
||||||
|
<button class="tag-button" data-tag="electron"><code>Electron</code></button>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
@@ -331,7 +332,7 @@ document.querySelectorAll('.tag-button').forEach(button => {
|
|||||||
|
|
||||||
[:octicons-code-16: Code](https://github.com/sofi444/realtime-transcription-fastrtc/blob/main/main.py)
|
[:octicons-code-16: Code](https://github.com/sofi444/realtime-transcription-fastrtc/blob/main/main.py)
|
||||||
|
|
||||||
- :speaking_head:{ .lg .middle } __Talk to Claude - Electron App__
|
- :speaking_head:{ .lg .middle } __Talk to Claude - Electron App__
|
||||||
{: data-tags="audio,electron"}
|
{: data-tags="audio,electron"}
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -341,8 +342,16 @@ document.querySelectorAll('.tag-button').forEach(button => {
|
|||||||
<video width=98% src="https://github.com/user-attachments/assets/df4628e4-ef0f-4a78-ab9b-1ed2374b1cae" controls style="text-align: center"></video>
|
<video width=98% src="https://github.com/user-attachments/assets/df4628e4-ef0f-4a78-ab9b-1ed2374b1cae" controls style="text-align: center"></video>
|
||||||
|
|
||||||
[:octicons-arrow-right-24: Demo](https://github.com/swairshah/voice-agent)
|
[:octicons-arrow-right-24: Demo](https://github.com/swairshah/voice-agent)
|
||||||
|
|
||||||
[:octicons-code-16: Code](https://github.com/swairshah/voice-agent)
|
[:octicons-code-16: Code](https://github.com/swairshah/voice-agent)
|
||||||
|
|
||||||
|
- :speaking_head:{ .lg .middle } __Azure Realtime API__
|
||||||
|
{: data-tags="audio,real-time-api"}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Use the Azure Realtime API to create a real-time voice chat with GPT-4o.
|
||||||
|
|
||||||
|
[:octicons-code-16: Code](https://github.com/freddyaboulton/fastrtc/tree/main/demo/talk_to_azure_openai)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
Reference in New Issue
Block a user