mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-05 18:09:23 +08:00
Rebrand to FastRTC (#60)
* Add code * add code * add code * Rename messages * rename * add code * Add demo * docs + demos + bug fixes * add code * styles * user guide * Styles * Add code * misc docs updates * print nit * whisper + pr * url for images * whsiper update * Fix bugs * remove demo files * version number * Fix pypi readme * Fix * demos * Add llama code editor * Update llama code editor and object detection cookbook * Add more cookbook demos * add code * Fix links for PR deploys * add code * Fix the install * add tts * TTS docs * Typo * Pending bubbles for reply on pause * Stream redesign (#63) * better error handling * Websocket error handling * add code --------- Co-authored-by: Freddy Boulton <freddyboulton@hf-freddy.local> * remove docs from dist * Some docs typos * more typos * upload changes + docs * docs * better phone * update docs * add code * Make demos better * fix docs + websocket start_up * remove mention of FastAPI app * fastphone tweaks * add code * ReplyOnStopWord fixes * Fix cookbook * Fix pypi readme * add code * bump versions * sambanova cookbook * Fix tags * Llm voice chat * kyutai tag * Add error message to all index.html * STT module uses Moonshine * Not required from typing extensions * fix llm voice chat * Add vpn warning * demo fixes * demos * Add more ui args and gemini audio-video * update cookbook * version 9 --------- Co-authored-by: Freddy Boulton <freddyboulton@hf-freddy.local>
This commit is contained in:
@@ -1,44 +0,0 @@
|
||||
---
|
||||
license: mit
|
||||
tags:
|
||||
- object-detection
|
||||
- computer-vision
|
||||
- yolov10
|
||||
datasets:
|
||||
- detection-datasets/coco
|
||||
sdk: gradio
|
||||
sdk_version: 5.0.0b1
|
||||
---
|
||||
|
||||
### Model Description
|
||||
[YOLOv10: Real-Time End-to-End Object Detection](https://arxiv.org/abs/2405.14458v1)
|
||||
|
||||
- arXiv: https://arxiv.org/abs/2405.14458v1
|
||||
- github: https://github.com/THU-MIG/yolov10
|
||||
|
||||
### Installation
|
||||
```
|
||||
pip install supervision git+https://github.com/THU-MIG/yolov10.git
|
||||
```
|
||||
|
||||
### Yolov10 Inference
|
||||
```python
|
||||
from ultralytics import YOLOv10
|
||||
import supervision as sv
|
||||
import cv2
|
||||
|
||||
IMAGE_PATH = 'dog.jpeg'
|
||||
|
||||
model = YOLOv10.from_pretrained('jameslahm/yolov10{n/s/m/b/l/x}')
|
||||
model.predict(IMAGE_PATH, show=True)
|
||||
```
|
||||
|
||||
### BibTeX Entry and Citation Info
|
||||
```
|
||||
@article{wang2024yolov10,
|
||||
title={YOLOv10: Real-Time End-to-End Object Detection},
|
||||
author={Wang, Ao and Chen, Hui and Liu, Lihao and Chen, Kai and Lin, Zijia and Han, Jungong and Ding, Guiguang},
|
||||
journal={arXiv preprint arXiv:2405.14458},
|
||||
year={2024}
|
||||
}
|
||||
```
|
||||
@@ -1,105 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import AdditionalOutputs, WebRTC
|
||||
from pydub import AudioSegment
|
||||
from twilio.rest import Client
|
||||
|
||||
# Configure the root logger to WARNING to suppress debug messages from other libraries
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
|
||||
# Create a console handler
|
||||
console_handler = logging.FileHandler("gradio_webrtc.log")
|
||||
console_handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Create a formatter
|
||||
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
console_handler.setFormatter(formatter)
|
||||
|
||||
# Configure the logger for your specific library
|
||||
logger = logging.getLogger("gradio_webrtc")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def generation(num_steps):
|
||||
for i in range(num_steps):
|
||||
segment = AudioSegment.from_file(
|
||||
"/Users/freddy/sources/gradio/demo/scratch/audio-streaming/librispeech.mp3"
|
||||
)
|
||||
yield (
|
||||
(
|
||||
segment.frame_rate,
|
||||
np.array(segment.get_array_of_samples()).reshape(1, -1),
|
||||
),
|
||||
AdditionalOutputs(
|
||||
f"Hello, from step {i}!",
|
||||
"/Users/freddy/sources/gradio/demo/scratch/audio-streaming/librispeech.mp3",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Audio Streaming (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="audio",
|
||||
)
|
||||
num_steps = gr.Slider(
|
||||
label="Number of Steps",
|
||||
minimum=1,
|
||||
maximum=10,
|
||||
step=1,
|
||||
value=5,
|
||||
)
|
||||
button = gr.Button("Generate")
|
||||
textbox = gr.Textbox(placeholder="Output will appear here.")
|
||||
audio_file = gr.Audio()
|
||||
|
||||
audio.stream(
|
||||
fn=generation, inputs=[num_steps], outputs=[audio], trigger=button.click
|
||||
)
|
||||
audio.on_additional_outputs(
|
||||
fn=lambda t, a: (f"State changed to {t}.", a),
|
||||
outputs=[textbox, audio_file],
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch(
|
||||
allowed_paths=[
|
||||
"/Users/freddy/sources/gradio/demo/scratch/audio-streaming/librispeech.mp3"
|
||||
]
|
||||
)
|
||||
367
demo/app.py
367
demo/app.py
@@ -1,367 +0,0 @@
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
|
||||
_docs = {
|
||||
"WebRTC": {
|
||||
"description": "Stream audio/video with WebRTC",
|
||||
"members": {
|
||||
"__init__": {
|
||||
"rtc_configuration": {
|
||||
"type": "dict[str, Any] | None",
|
||||
"default": "None",
|
||||
"description": "The configration dictionary to pass to the RTCPeerConnection constructor. If None, the default configuration is used.",
|
||||
},
|
||||
"height": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"width": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"label": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.",
|
||||
},
|
||||
"show_label": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will display label.",
|
||||
},
|
||||
"container": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True, will place the component in a container - providing some extra padding around the border.",
|
||||
},
|
||||
"scale": {
|
||||
"type": "int | None",
|
||||
"default": "None",
|
||||
"description": "relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.",
|
||||
},
|
||||
"min_width": {
|
||||
"type": "int",
|
||||
"default": "160",
|
||||
"description": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.",
|
||||
},
|
||||
"interactive": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.",
|
||||
},
|
||||
"visible": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will be hidden.",
|
||||
},
|
||||
"elem_id": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "an optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"elem_classes": {
|
||||
"type": "list[str] | str | None",
|
||||
"default": "None",
|
||||
"description": "an optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"render": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.",
|
||||
},
|
||||
"key": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.",
|
||||
},
|
||||
"mirror_webcam": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True webcam will be mirrored. Default is True.",
|
||||
},
|
||||
},
|
||||
"events": {"tick": {"type": None, "default": None, "description": ""}},
|
||||
},
|
||||
"__meta__": {"additional_interfaces": {}, "user_fn_refs": {"WebRTC": []}},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
abs_path = os.path.join(os.path.dirname(__file__), "css.css")
|
||||
|
||||
with gr.Blocks(
|
||||
css_paths=abs_path,
|
||||
theme=gr.themes.Default(
|
||||
font_mono=[
|
||||
gr.themes.GoogleFont("Inconsolata"),
|
||||
"monospace",
|
||||
],
|
||||
),
|
||||
) as demo:
|
||||
gr.Markdown(
|
||||
"""
|
||||
<h1 style='text-align: center; margin-bottom: 1rem'> Gradio WebRTC ⚡️ </h1>
|
||||
|
||||
<div style="display: flex; flex-direction: row; justify-content: center">
|
||||
<img style="display: block; padding-right: 5px; height: 20px;" alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.6%20-%20orange">
|
||||
<a href="https://github.com/freddyaboulton/gradio-webrtc" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/github-white?logo=github&logoColor=black"></a>
|
||||
</div>
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
gr.Markdown(
|
||||
"""
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install gradio_webrtc
|
||||
```
|
||||
|
||||
## Examples:
|
||||
1. [Object Detection from Webcam with YOLOv10](https://huggingface.co/spaces/freddyaboulton/webrtc-yolov10n) 📷
|
||||
2. [Streaming Object Detection from Video with RT-DETR](https://huggingface.co/spaces/freddyaboulton/rt-detr-object-detection-webrtc) 🎥
|
||||
3. [Text-to-Speech](https://huggingface.co/spaces/freddyaboulton/parler-tts-streaming-webrtc) 🗣️
|
||||
4. [Conversational AI](https://huggingface.co/spaces/freddyaboulton/omni-mini-webrtc) 🤖🗣️
|
||||
|
||||
## Usage
|
||||
|
||||
The WebRTC component supports the following three use cases:
|
||||
1. [Streaming video from the user webcam to the server and back](#h-streaming-video-from-the-user-webcam-to-the-server-and-back)
|
||||
2. [Streaming Video from the server to the client](#h-streaming-video-from-the-server-to-the-client)
|
||||
3. [Streaming Audio from the server to the client](#h-streaming-audio-from-the-server-to-the-client)
|
||||
4. [Streaming Audio from the client to the server and back (conversational AI)](#h-conversational-ai)
|
||||
|
||||
|
||||
## Streaming Video from the User Webcam to the Server and Back
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
... your detection code here ...
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
image = WebRTC(label="Stream", mode="send-receive", modality="video")
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
image.stream(
|
||||
fn=detection,
|
||||
inputs=[image, conf_threshold],
|
||||
outputs=[image], time_limit=10
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
|
||||
```
|
||||
* Set the `mode` parameter to `send-receive` and `modality` to "video".
|
||||
* The `stream` event's `fn` parameter is a function that receives the next frame from the webcam
|
||||
as a **numpy array** and returns the processed frame also as a **numpy array**.
|
||||
* Numpy arrays are in (height, width, 3) format where the color channels are in RGB format.
|
||||
* The `inputs` parameter should be a list where the first element is the WebRTC component. The only output allowed is the WebRTC component.
|
||||
* The `time_limit` parameter is the maximum time in seconds the video stream will run. If the time limit is reached, the video stream will stop.
|
||||
|
||||
## Streaming Video from the server to the client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
import cv2
|
||||
|
||||
def generation():
|
||||
url = "https://download.tsi.telecom-paristech.fr/gpac/dataset/dash/uhd/mux_sources/hevcds_720p30_2M.mp4"
|
||||
cap = cv2.VideoCapture(url)
|
||||
iterating = True
|
||||
while iterating:
|
||||
iterating, frame = cap.read()
|
||||
yield frame
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
output_video = WebRTC(label="Video Stream", mode="receive", modality="video")
|
||||
button = gr.Button("Start", variant="primary")
|
||||
output_video.stream(
|
||||
fn=generation, inputs=None, outputs=[output_video],
|
||||
trigger=button.click
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "video".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next frame from the video as a **numpy array**.
|
||||
* The only output allowed is the WebRTC component.
|
||||
* The `trigger` parameter the gradio event that will trigger the webrtc connection. In this case, the button click event.
|
||||
|
||||
## Streaming Audio from the Server to the Client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from pydub import AudioSegment
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file("/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav")
|
||||
yield (segment.frame_rate, np.array(segment.get_array_of_samples()).reshape(1, -1))
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
audio = WebRTC(label="Stream", mode="receive", modality="audio")
|
||||
num_steps = gr.Slider(
|
||||
label="Number of Steps",
|
||||
minimum=1,
|
||||
maximum=10,
|
||||
step=1,
|
||||
value=5,
|
||||
)
|
||||
button = gr.Button("Generate")
|
||||
|
||||
audio.stream(
|
||||
fn=generation, inputs=[num_steps], outputs=[audio],
|
||||
trigger=button.click
|
||||
)
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "audio".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next audio segment as a tuple of (frame_rate, audio_samples).
|
||||
* The numpy array should be of shape (1, num_samples).
|
||||
* The `outputs` parameter should be a list with the WebRTC component as the only element.
|
||||
|
||||
## Conversational AI
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC, StreamHandler
|
||||
from queue import Queue
|
||||
import time
|
||||
|
||||
|
||||
class EchoHandler(StreamHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.queue = Queue()
|
||||
|
||||
def receive(self, frame: tuple[int, np.ndarray] | np.ndarray) -> None:
|
||||
self.queue.put(frame)
|
||||
|
||||
def emit(self) -> None:
|
||||
return self.queue.get()
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
with gr.Column():
|
||||
with gr.Group():
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=None,
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
audio.stream(fn=EchoHandler(), inputs=[audio], outputs=[audio], time_limit=15)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Instead of passing a function to the `stream` event's `fn` parameter, pass a `StreamHandler` implementation. The `StreamHandler` above simply echoes the audio back to the client.
|
||||
* The `StreamHandler` class has two methods: `receive` and `emit`. The `receive` method is called when a new frame is received from the client, and the `emit` method returns the next frame to send to the client.
|
||||
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
||||
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
||||
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
||||
* The `emit` method SHOULD NOT block. If a frame is not ready to be sent, the method should return None.
|
||||
|
||||
## Deployment
|
||||
|
||||
When deploying in a cloud environment (like Hugging Face Spaces, EC2, etc), you need to set up a TURN server to relay the WebRTC traffic.
|
||||
The easiest way to do this is to use a service like Twilio.
|
||||
|
||||
```python
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
...
|
||||
rtc = WebRTC(rtc_configuration=rtc_configuration, ...)
|
||||
...
|
||||
```
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
gr.Markdown(
|
||||
"""
|
||||
##
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
gr.ParamViewer(value=_docs["WebRTC"]["members"]["__init__"], linkify=[])
|
||||
|
||||
demo.load(
|
||||
None,
|
||||
js=r"""function() {
|
||||
const refs = {};
|
||||
const user_fn_refs = {
|
||||
WebRTC: [], };
|
||||
requestAnimationFrame(() => {
|
||||
|
||||
Object.entries(user_fn_refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}-user-fn`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Object.entries(refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
""",
|
||||
)
|
||||
|
||||
demo.launch()
|
||||
367
demo/app_.py
367
demo/app_.py
@@ -1,367 +0,0 @@
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
|
||||
_docs = {
|
||||
"WebRTC": {
|
||||
"description": "Stream audio/video with WebRTC",
|
||||
"members": {
|
||||
"__init__": {
|
||||
"rtc_configuration": {
|
||||
"type": "dict[str, Any] | None",
|
||||
"default": "None",
|
||||
"description": "The configration dictionary to pass to the RTCPeerConnection constructor. If None, the default configuration is used.",
|
||||
},
|
||||
"height": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"width": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"label": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.",
|
||||
},
|
||||
"show_label": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will display label.",
|
||||
},
|
||||
"container": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True, will place the component in a container - providing some extra padding around the border.",
|
||||
},
|
||||
"scale": {
|
||||
"type": "int | None",
|
||||
"default": "None",
|
||||
"description": "relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.",
|
||||
},
|
||||
"min_width": {
|
||||
"type": "int",
|
||||
"default": "160",
|
||||
"description": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.",
|
||||
},
|
||||
"interactive": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.",
|
||||
},
|
||||
"visible": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will be hidden.",
|
||||
},
|
||||
"elem_id": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "an optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"elem_classes": {
|
||||
"type": "list[str] | str | None",
|
||||
"default": "None",
|
||||
"description": "an optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"render": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.",
|
||||
},
|
||||
"key": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.",
|
||||
},
|
||||
"mirror_webcam": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True webcam will be mirrored. Default is True.",
|
||||
},
|
||||
},
|
||||
"events": {"tick": {"type": None, "default": None, "description": ""}},
|
||||
},
|
||||
"__meta__": {"additional_interfaces": {}, "user_fn_refs": {"WebRTC": []}},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
abs_path = os.path.join(os.path.dirname(__file__), "css.css")
|
||||
|
||||
with gr.Blocks(
|
||||
css_paths=abs_path,
|
||||
theme=gr.themes.Default(
|
||||
font_mono=[
|
||||
gr.themes.GoogleFont("Inconsolata"),
|
||||
"monospace",
|
||||
],
|
||||
),
|
||||
) as demo:
|
||||
gr.Markdown(
|
||||
"""
|
||||
<h1 style='text-align: center; margin-bottom: 1rem'> Gradio WebRTC ⚡️ </h1>
|
||||
|
||||
<div style="display: flex; flex-direction: row; justify-content: center">
|
||||
<img style="display: block; padding-right: 5px; height: 20px;" alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.6%20-%20orange">
|
||||
<a href="https://github.com/freddyaboulton/gradio-webrtc" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/github-white?logo=github&logoColor=black"></a>
|
||||
</div>
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
gr.Markdown(
|
||||
"""
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install gradio_webrtc
|
||||
```
|
||||
|
||||
## Examples:
|
||||
1. [Object Detection from Webcam with YOLOv10](https://huggingface.co/spaces/freddyaboulton/webrtc-yolov10n) 📷
|
||||
2. [Streaming Object Detection from Video with RT-DETR](https://huggingface.co/spaces/freddyaboulton/rt-detr-object-detection-webrtc) 🎥
|
||||
3. [Text-to-Speech](https://huggingface.co/spaces/freddyaboulton/parler-tts-streaming-webrtc) 🗣️
|
||||
4. [Conversational AI](https://huggingface.co/spaces/freddyaboulton/omni-mini-webrtc) 🤖🗣️
|
||||
|
||||
## Usage
|
||||
|
||||
The WebRTC component supports the following three use cases:
|
||||
1. [Streaming video from the user webcam to the server and back](#h-streaming-video-from-the-user-webcam-to-the-server-and-back)
|
||||
2. [Streaming Video from the server to the client](#h-streaming-video-from-the-server-to-the-client)
|
||||
3. [Streaming Audio from the server to the client](#h-streaming-audio-from-the-server-to-the-client)
|
||||
4. [Streaming Audio from the client to the server and back (conversational AI)](#h-conversational-ai)
|
||||
|
||||
|
||||
## Streaming Video from the User Webcam to the Server and Back
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
... your detection code here ...
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
image = WebRTC(label="Stream", mode="send-receive", modality="video")
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
image.stream(
|
||||
fn=detection,
|
||||
inputs=[image, conf_threshold],
|
||||
outputs=[image], time_limit=10
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
|
||||
```
|
||||
* Set the `mode` parameter to `send-receive` and `modality` to "video".
|
||||
* The `stream` event's `fn` parameter is a function that receives the next frame from the webcam
|
||||
as a **numpy array** and returns the processed frame also as a **numpy array**.
|
||||
* Numpy arrays are in (height, width, 3) format where the color channels are in RGB format.
|
||||
* The `inputs` parameter should be a list where the first element is the WebRTC component. The only output allowed is the WebRTC component.
|
||||
* The `time_limit` parameter is the maximum time in seconds the video stream will run. If the time limit is reached, the video stream will stop.
|
||||
|
||||
## Streaming Video from the server to the client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
import cv2
|
||||
|
||||
def generation():
|
||||
url = "https://download.tsi.telecom-paristech.fr/gpac/dataset/dash/uhd/mux_sources/hevcds_720p30_2M.mp4"
|
||||
cap = cv2.VideoCapture(url)
|
||||
iterating = True
|
||||
while iterating:
|
||||
iterating, frame = cap.read()
|
||||
yield frame
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
output_video = WebRTC(label="Video Stream", mode="receive", modality="video")
|
||||
button = gr.Button("Start", variant="primary")
|
||||
output_video.stream(
|
||||
fn=generation, inputs=None, outputs=[output_video],
|
||||
trigger=button.click
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "video".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next frame from the video as a **numpy array**.
|
||||
* The only output allowed is the WebRTC component.
|
||||
* The `trigger` parameter the gradio event that will trigger the webrtc connection. In this case, the button click event.
|
||||
|
||||
## Streaming Audio from the Server to the Client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from pydub import AudioSegment
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file("/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav")
|
||||
yield (segment.frame_rate, np.array(segment.get_array_of_samples()).reshape(1, -1))
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
audio = WebRTC(label="Stream", mode="receive", modality="audio")
|
||||
num_steps = gr.Slider(
|
||||
label="Number of Steps",
|
||||
minimum=1,
|
||||
maximum=10,
|
||||
step=1,
|
||||
value=5,
|
||||
)
|
||||
button = gr.Button("Generate")
|
||||
|
||||
audio.stream(
|
||||
fn=generation, inputs=[num_steps], outputs=[audio],
|
||||
trigger=button.click
|
||||
)
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "audio".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next audio segment as a tuple of (frame_rate, audio_samples).
|
||||
* The numpy array should be of shape (1, num_samples).
|
||||
* The `outputs` parameter should be a list with the WebRTC component as the only element.
|
||||
|
||||
## Conversational AI
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC, StreamHandler
|
||||
from queue import Queue
|
||||
import time
|
||||
|
||||
|
||||
class EchoHandler(StreamHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.queue = Queue()
|
||||
|
||||
def receive(self, frame: tuple[int, np.ndarray] | np.ndarray) -> None:
|
||||
self.queue.put(frame)
|
||||
|
||||
def emit(self) -> None:
|
||||
return self.queue.get()
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
with gr.Column():
|
||||
with gr.Group():
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=None,
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
audio.stream(fn=EchoHandler(), inputs=[audio], outputs=[audio], time_limit=15)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Instead of passing a function to the `stream` event's `fn` parameter, pass a `StreamHandler` implementation. The `StreamHandler` above simply echoes the audio back to the client.
|
||||
* The `StreamHandler` class has two methods: `receive` and `emit`. The `receive` method is called when a new frame is received from the client, and the `emit` method returns the next frame to send to the client.
|
||||
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
||||
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
||||
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
||||
* The `emit` method SHOULD NOT block. If a frame is not ready to be sent, the method should return None.
|
||||
|
||||
## Deployment
|
||||
|
||||
When deploying in a cloud environment (like Hugging Face Spaces, EC2, etc), you need to set up a TURN server to relay the WebRTC traffic.
|
||||
The easiest way to do this is to use a service like Twilio.
|
||||
|
||||
```python
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
...
|
||||
rtc = WebRTC(rtc_configuration=rtc_configuration, ...)
|
||||
...
|
||||
```
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
gr.Markdown(
|
||||
"""
|
||||
##
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
gr.ParamViewer(value=_docs["WebRTC"]["members"]["__init__"], linkify=[])
|
||||
|
||||
demo.load(
|
||||
None,
|
||||
js=r"""function() {
|
||||
const refs = {};
|
||||
const user_fn_refs = {
|
||||
WebRTC: [], };
|
||||
requestAnimationFrame(() => {
|
||||
|
||||
Object.entries(user_fn_refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}-user-fn`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Object.entries(refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
""",
|
||||
)
|
||||
|
||||
demo.launch()
|
||||
@@ -1,73 +0,0 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
from huggingface_hub import hf_hub_download
|
||||
from inference import YOLOv10
|
||||
from twilio.rest import Client
|
||||
|
||||
model_file = hf_hub_download(
|
||||
repo_id="onnx-community/yolov10n", filename="onnx/model.onnx"
|
||||
)
|
||||
|
||||
model = YOLOv10(model_file)
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
image = cv2.resize(image, (model.input_width, model.input_height))
|
||||
new_image = model.detect_objects(image, conf_threshold)
|
||||
return cv2.resize(new_image, (500, 500))
|
||||
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
|
||||
|
||||
with gr.Blocks(css=css) as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
YOLOv10 Webcam Stream (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
gr.HTML(
|
||||
"""
|
||||
<h3 style='text-align: center'>
|
||||
<a href='https://arxiv.org/abs/2405.14458' target='_blank'>arXiv</a> | <a href='https://github.com/THU-MIG/yolov10' target='_blank'>github</a>
|
||||
</h3>
|
||||
"""
|
||||
)
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
image = WebRTC(label="Stream", rtc_configuration=rtc_configuration)
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
|
||||
image.stream(
|
||||
fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
@@ -1,71 +0,0 @@
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC
|
||||
from pydub import AudioSegment
|
||||
from twilio.rest import Client
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file(
|
||||
"/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav"
|
||||
)
|
||||
yield (
|
||||
segment.frame_rate,
|
||||
np.array(segment.get_array_of_samples()).reshape(1, -1),
|
||||
)
|
||||
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Audio Streaming (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="audio",
|
||||
)
|
||||
num_steps = gr.Slider(
|
||||
label="Number of Steps",
|
||||
minimum=1,
|
||||
maximum=10,
|
||||
step=1,
|
||||
value=5,
|
||||
)
|
||||
button = gr.Button("Generate")
|
||||
|
||||
audio.stream(
|
||||
fn=generation, inputs=[num_steps], outputs=[audio], trigger=button.click
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
@@ -1,64 +0,0 @@
|
||||
import os
|
||||
import time
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC
|
||||
from pydub import AudioSegment
|
||||
from twilio.rest import Client
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file(
|
||||
"/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav"
|
||||
)
|
||||
yield (
|
||||
segment.frame_rate,
|
||||
np.array(segment.get_array_of_samples()).reshape(1, -1),
|
||||
)
|
||||
time.sleep(3.5)
|
||||
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Audio Streaming (Powered by WebRaTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
gr.Slider()
|
||||
with gr.Column():
|
||||
# audio = gr.Audio(interactive=False)
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
161
demo/css.css
161
demo/css.css
@@ -1,161 +0,0 @@
|
||||
html {
|
||||
font-family: Inter;
|
||||
font-size: 16px;
|
||||
font-weight: 400;
|
||||
line-height: 1.5;
|
||||
-webkit-text-size-adjust: 100%;
|
||||
background: #fff;
|
||||
color: #323232;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
text-rendering: optimizeLegibility;
|
||||
}
|
||||
|
||||
:root {
|
||||
--space: 1;
|
||||
--vspace: calc(var(--space) * 1rem);
|
||||
--vspace-0: calc(3 * var(--space) * 1rem);
|
||||
--vspace-1: calc(2 * var(--space) * 1rem);
|
||||
--vspace-2: calc(1.5 * var(--space) * 1rem);
|
||||
--vspace-3: calc(0.5 * var(--space) * 1rem);
|
||||
}
|
||||
|
||||
.app {
|
||||
max-width: 748px !important;
|
||||
}
|
||||
|
||||
.prose p {
|
||||
margin: var(--vspace) 0;
|
||||
line-height: var(--vspace * 2);
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
code {
|
||||
font-family: "Inconsolata", sans-serif;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
h1,
|
||||
h1 code {
|
||||
font-weight: 400;
|
||||
line-height: calc(2.5 / var(--space) * var(--vspace));
|
||||
}
|
||||
|
||||
h1 code {
|
||||
background: none;
|
||||
border: none;
|
||||
letter-spacing: 0.05em;
|
||||
padding-bottom: 5px;
|
||||
position: relative;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
h2 {
|
||||
margin: var(--vspace-1) 0 var(--vspace-2) 0;
|
||||
line-height: 1em;
|
||||
}
|
||||
|
||||
h3,
|
||||
h3 code {
|
||||
margin: var(--vspace-1) 0 var(--vspace-2) 0;
|
||||
line-height: 1em;
|
||||
}
|
||||
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
margin: var(--vspace-3) 0 var(--vspace-3) 0;
|
||||
line-height: var(--vspace);
|
||||
}
|
||||
|
||||
.bigtitle,
|
||||
h1,
|
||||
h1 code {
|
||||
font-size: calc(8px * 4.5);
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.title,
|
||||
h2,
|
||||
h2 code {
|
||||
font-size: calc(8px * 3.375);
|
||||
font-weight: lighter;
|
||||
word-break: break-word;
|
||||
border: none;
|
||||
background: none;
|
||||
}
|
||||
|
||||
.subheading1,
|
||||
h3,
|
||||
h3 code {
|
||||
font-size: calc(8px * 1.8);
|
||||
font-weight: 600;
|
||||
border: none;
|
||||
background: none;
|
||||
letter-spacing: 0.1em;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
h2 code {
|
||||
padding: 0;
|
||||
position: relative;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
blockquote {
|
||||
font-size: calc(8px * 1.1667);
|
||||
font-style: italic;
|
||||
line-height: calc(1.1667 * var(--vspace));
|
||||
margin: var(--vspace-2) var(--vspace-2);
|
||||
}
|
||||
|
||||
.subheading2,
|
||||
h4 {
|
||||
font-size: calc(8px * 1.4292);
|
||||
text-transform: uppercase;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.subheading3,
|
||||
h5 {
|
||||
font-size: calc(8px * 1.2917);
|
||||
line-height: calc(1.2917 * var(--vspace));
|
||||
|
||||
font-weight: lighter;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.15em;
|
||||
}
|
||||
|
||||
h6 {
|
||||
font-size: calc(8px * 1.1667);
|
||||
font-size: 1.1667em;
|
||||
font-weight: normal;
|
||||
font-style: italic;
|
||||
font-family: "le-monde-livre-classic-byol", serif !important;
|
||||
letter-spacing: 0px !important;
|
||||
}
|
||||
|
||||
#start .md > *:first-child {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
h2 + h3 {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.md hr {
|
||||
border: none;
|
||||
border-top: 1px solid var(--block-border-color);
|
||||
margin: var(--vspace-2) 0 var(--vspace-2) 0;
|
||||
}
|
||||
.prose ul {
|
||||
margin: var(--vspace-2) 0 var(--vspace-1) 0;
|
||||
}
|
||||
|
||||
.gap {
|
||||
gap: 0;
|
||||
}
|
||||
|
||||
.md-custom {
|
||||
overflow: hidden;
|
||||
}
|
||||
99
demo/docs.py
99
demo/docs.py
@@ -1,99 +0,0 @@
|
||||
_docs = {
|
||||
"WebRTC": {
|
||||
"description": "Stream audio/video with WebRTC",
|
||||
"members": {
|
||||
"__init__": {
|
||||
"rtc_configuration": {
|
||||
"type": "dict[str, Any] | None",
|
||||
"default": "None",
|
||||
"description": "The configration dictionary to pass to the RTCPeerConnection constructor. If None, the default configuration is used.",
|
||||
},
|
||||
"height": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"width": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"label": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.",
|
||||
},
|
||||
"show_label": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will display label.",
|
||||
},
|
||||
"container": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True, will place the component in a container - providing some extra padding around the border.",
|
||||
},
|
||||
"scale": {
|
||||
"type": "int | None",
|
||||
"default": "None",
|
||||
"description": "relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.",
|
||||
},
|
||||
"min_width": {
|
||||
"type": "int",
|
||||
"default": "160",
|
||||
"description": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.",
|
||||
},
|
||||
"interactive": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.",
|
||||
},
|
||||
"visible": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will be hidden.",
|
||||
},
|
||||
"elem_id": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "an optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"elem_classes": {
|
||||
"type": "list[str] | str | None",
|
||||
"default": "None",
|
||||
"description": "an optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"render": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.",
|
||||
},
|
||||
"key": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.",
|
||||
},
|
||||
"mirror_webcam": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True webcam will be mirrored. Default is True.",
|
||||
},
|
||||
"postprocess": {
|
||||
"value": {
|
||||
"type": "typing.Any",
|
||||
"description": "Expects a {str} or {pathlib.Path} filepath to a video which is displayed, or a {Tuple[str | pathlib.Path, str | pathlib.Path | None]} where the first element is a filepath to a video and the second element is an optional filepath to a subtitle file.",
|
||||
}
|
||||
},
|
||||
"preprocess": {
|
||||
"return": {
|
||||
"type": "str",
|
||||
"description": "Passes the uploaded video as a `str` filepath or URL whose extension can be modified by `format`.",
|
||||
},
|
||||
"value": None,
|
||||
},
|
||||
},
|
||||
"events": {"tick": {"type": None, "default": None, "description": ""}},
|
||||
},
|
||||
"__meta__": {"additional_interfaces": {}, "user_fn_refs": {"WebRTC": []}},
|
||||
}
|
||||
}
|
||||
15
demo/echo_audio/README.md
Normal file
15
demo/echo_audio/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Echo Audio
|
||||
emoji: 🪩
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Simple echo stream - simplest FastRTC demo
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
45
demo/echo_audio/app.py
Normal file
45
demo/echo_audio/app.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import numpy as np
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import RedirectResponse
|
||||
from fastrtc import ReplyOnPause, Stream, get_twilio_turn_credentials
|
||||
from gradio.utils import get_space
|
||||
|
||||
|
||||
def detection(audio: tuple[int, np.ndarray]):
|
||||
# Implement any iterator that yields audio
|
||||
# See "LLM Voice Chat" for a more complete example
|
||||
yield audio
|
||||
|
||||
|
||||
stream = Stream(
|
||||
handler=ReplyOnPause(detection),
|
||||
modality="audio",
|
||||
mode="send-receive",
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def index():
|
||||
return RedirectResponse(
|
||||
url="/ui" if not get_space() else "https://fastrtc-echo-audio.hf.space/ui/"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
3
demo/echo_audio/requirements.txt
Normal file
3
demo/echo_audio/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
fastrtc[vad]
|
||||
twilio
|
||||
python-dotenv
|
||||
@@ -1,61 +0,0 @@
|
||||
import logging
|
||||
from queue import Queue
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import StreamHandler, WebRTC
|
||||
|
||||
# Configure the root logger to WARNING to suppress debug messages from other libraries
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
|
||||
# Create a console handler
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Create a formatter
|
||||
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
|
||||
console_handler.setFormatter(formatter)
|
||||
|
||||
# Configure the logger for your specific library
|
||||
logger = logging.getLogger("gradio_webrtc")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
|
||||
class EchoHandler(StreamHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.queue = Queue()
|
||||
|
||||
def receive(self, frame: tuple[int, np.ndarray] | np.ndarray) -> None:
|
||||
self.queue.put(frame)
|
||||
|
||||
def emit(self) -> None:
|
||||
return self.queue.get()
|
||||
|
||||
def copy(self) -> StreamHandler:
|
||||
return EchoHandler()
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Conversational AI (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
with gr.Column():
|
||||
with gr.Group():
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=None,
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
audio.stream(fn=EchoHandler(), inputs=[audio], outputs=[audio], time_limit=15)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
15
demo/gemini_audio_video/README.md
Normal file
15
demo/gemini_audio_video/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Gemini Audio Video
|
||||
emoji: ♊️
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Gemini understands audio and video!
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GEMINI_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
207
demo/gemini_audio_video/app.py
Normal file
207
demo/gemini_audio_video/app.py
Normal file
@@ -0,0 +1,207 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import os
|
||||
import time
|
||||
from io import BytesIO
|
||||
|
||||
import gradio as gr
|
||||
from gradio.utils import get_space
|
||||
import numpy as np
|
||||
from google import genai
|
||||
from dotenv import load_dotenv
|
||||
from fastrtc import (
|
||||
AsyncAudioVideoStreamHandler,
|
||||
Stream,
|
||||
get_twilio_turn_credentials,
|
||||
WebRTC,
|
||||
)
|
||||
from PIL import Image
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def encode_audio(data: np.ndarray) -> dict:
|
||||
"""Encode Audio data to send to the server"""
|
||||
return {
|
||||
"mime_type": "audio/pcm",
|
||||
"data": base64.b64encode(data.tobytes()).decode("UTF-8"),
|
||||
}
|
||||
|
||||
|
||||
def encode_image(data: np.ndarray) -> dict:
|
||||
with BytesIO() as output_bytes:
|
||||
pil_image = Image.fromarray(data)
|
||||
pil_image.save(output_bytes, "JPEG")
|
||||
bytes_data = output_bytes.getvalue()
|
||||
base64_str = str(base64.b64encode(bytes_data), "utf-8")
|
||||
return {"mime_type": "image/jpeg", "data": base64_str}
|
||||
|
||||
|
||||
class GeminiHandler(AsyncAudioVideoStreamHandler):
|
||||
def __init__(
|
||||
self,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
"mono",
|
||||
output_sample_rate=24000,
|
||||
output_frame_size=480,
|
||||
input_sample_rate=16000,
|
||||
)
|
||||
self.audio_queue = asyncio.Queue()
|
||||
self.video_queue = asyncio.Queue()
|
||||
self.quit = asyncio.Event()
|
||||
self.session = None
|
||||
self.last_frame_time = 0
|
||||
self.quit = asyncio.Event()
|
||||
|
||||
def copy(self) -> "GeminiHandler":
|
||||
return GeminiHandler()
|
||||
|
||||
async def start_up(self):
|
||||
client = genai.Client(
|
||||
api_key=os.getenv("GEMINI_API_KEY"), http_options={"api_version": "v1alpha"}
|
||||
)
|
||||
config = {"response_modalities": ["AUDIO"]}
|
||||
try:
|
||||
async with client.aio.live.connect(
|
||||
model="gemini-2.0-flash-exp", config=config
|
||||
) as session:
|
||||
self.session = session
|
||||
print("set session")
|
||||
while not self.quit.is_set():
|
||||
turn = self.session.receive()
|
||||
async for response in turn:
|
||||
if data := response.data:
|
||||
audio = np.frombuffer(data, dtype=np.int16).reshape(1, -1)
|
||||
self.audio_queue.put_nowait(audio)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
async def video_receive(self, frame: np.ndarray):
|
||||
try:
|
||||
print("out")
|
||||
if self.session:
|
||||
print("here")
|
||||
# send image every 1 second
|
||||
print(time.time() - self.last_frame_time)
|
||||
if time.time() - self.last_frame_time > 1:
|
||||
self.last_frame_time = time.time()
|
||||
print("sending image")
|
||||
await self.session.send(input=encode_image(frame))
|
||||
print("sent image")
|
||||
if self.latest_args[1] is not None:
|
||||
print("sending image2")
|
||||
await self.session.send(input=encode_image(self.latest_args[1]))
|
||||
print("sent image2")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
self.video_queue.put_nowait(frame)
|
||||
|
||||
async def video_emit(self):
|
||||
return await self.video_queue.get()
|
||||
|
||||
async def receive(self, frame: tuple[int, np.ndarray]) -> None:
|
||||
_, array = frame
|
||||
array = array.squeeze()
|
||||
audio_message = encode_audio(array)
|
||||
if self.session:
|
||||
try:
|
||||
await self.session.send(input=audio_message)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
async def emit(self):
|
||||
array = await self.audio_queue.get()
|
||||
return (self.output_sample_rate, array)
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
if self.session:
|
||||
self.quit.set()
|
||||
await self.session._websocket.close()
|
||||
self.quit.clear()
|
||||
|
||||
|
||||
stream = Stream(
|
||||
handler=GeminiHandler(),
|
||||
modality="audio-video",
|
||||
mode="send-receive",
|
||||
rtc_configuration=get_twilio_turn_credentials()
|
||||
if get_space() == "spaces"
|
||||
else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
additional_inputs=[
|
||||
gr.Image(label="Image", type="numpy", sources=["upload", "clipboard"])
|
||||
],
|
||||
ui_args={
|
||||
"icon": "https://www.gstatic.com/lamda/images/gemini_favicon_f069958c85030456e93de685481c559f160ea06b.png",
|
||||
"pulse_color": "rgb(35, 157, 225)",
|
||||
"icon_button_color": "rgb(35, 157, 225)",
|
||||
"title": "Gemini Audio Video Chat",
|
||||
},
|
||||
)
|
||||
|
||||
css = """
|
||||
#video-source {max-width: 600px !important; max-height: 600 !important;}
|
||||
"""
|
||||
|
||||
with gr.Blocks(css=css) as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<div style='display: flex; align-items: center; justify-content: center; gap: 20px'>
|
||||
<div style="background-color: var(--block-background-fill); border-radius: 8px">
|
||||
<img src="https://www.gstatic.com/lamda/images/gemini_favicon_f069958c85030456e93de685481c559f160ea06b.png" style="width: 100px; height: 100px;">
|
||||
</div>
|
||||
<div>
|
||||
<h1>Gen AI SDK Voice Chat</h1>
|
||||
<p>Speak with Gemini using real-time audio + video streaming</p>
|
||||
<p>Powered by <a href="https://gradio.app/">Gradio</a> and <a href=https://freddyaboulton.github.io/gradio-webrtc/">WebRTC</a>⚡️</p>
|
||||
<p>Get an API Key <a href="https://support.google.com/googleapi/answer/6158862?hl=en">here</a></p>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
)
|
||||
with gr.Row() as row:
|
||||
with gr.Column():
|
||||
webrtc = WebRTC(
|
||||
label="Video Chat",
|
||||
modality="audio-video",
|
||||
mode="send-receive",
|
||||
elem_id="video-source",
|
||||
rtc_configuration=get_twilio_turn_credentials()
|
||||
if get_space() == "spaces"
|
||||
else None,
|
||||
icon="https://www.gstatic.com/lamda/images/gemini_favicon_f069958c85030456e93de685481c559f160ea06b.png",
|
||||
pulse_color="rgb(35, 157, 225)",
|
||||
icon_button_color="rgb(35, 157, 225)",
|
||||
)
|
||||
with gr.Column():
|
||||
image_input = gr.Image(
|
||||
label="Image", type="numpy", sources=["upload", "clipboard"]
|
||||
)
|
||||
|
||||
webrtc.stream(
|
||||
GeminiHandler(),
|
||||
inputs=[webrtc, image_input],
|
||||
outputs=[webrtc],
|
||||
time_limit=60 if get_space() else None,
|
||||
concurrency_limit=2 if get_space() else None,
|
||||
)
|
||||
|
||||
stream.ui = demo
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
raise ValueError("Phone mode not supported for this demo")
|
||||
else:
|
||||
stream.ui.launch(server_port=7860)
|
||||
4
demo/gemini_audio_video/requirements.txt
Normal file
4
demo/gemini_audio_video/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc
|
||||
python-dotenv
|
||||
google-genai
|
||||
twilio
|
||||
15
demo/hello_computer/README.md
Normal file
15
demo/hello_computer/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Hello Computer
|
||||
emoji: 💻
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Say computer before asking your question
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|SAMBANOVA_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
15
demo/hello_computer/README_gradio.md
Normal file
15
demo/hello_computer/README_gradio.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Hello Computer (Gradio)
|
||||
emoji: 💻
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Say computer (Gradio)
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|SAMBANOVA_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
153
demo/hello_computer/app.py
Normal file
153
demo/hello_computer/app.py
Normal file
@@ -0,0 +1,153 @@
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnStopWords,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
get_stt_model,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from gradio.utils import get_space
|
||||
from pydantic import BaseModel
|
||||
|
||||
load_dotenv()
|
||||
|
||||
curr_dir = Path(__file__).parent
|
||||
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=os.environ.get("SAMBANOVA_API_KEY"),
|
||||
base_url="https://api.sambanova.ai/v1",
|
||||
)
|
||||
model = get_stt_model()
|
||||
|
||||
|
||||
def response(
|
||||
audio: tuple[int, np.ndarray],
|
||||
gradio_chatbot: list[dict] | None = None,
|
||||
conversation_state: list[dict] | None = None,
|
||||
):
|
||||
gradio_chatbot = gradio_chatbot or []
|
||||
conversation_state = conversation_state or []
|
||||
try:
|
||||
text = model.stt(audio)
|
||||
print("STT in handler", text)
|
||||
sample_rate, array = audio
|
||||
gradio_chatbot.append(
|
||||
{"role": "user", "content": gr.Audio((sample_rate, array.squeeze()))}
|
||||
)
|
||||
yield AdditionalOutputs(gradio_chatbot, conversation_state)
|
||||
|
||||
conversation_state.append({"role": "user", "content": text})
|
||||
|
||||
request = client.chat.completions.create(
|
||||
model="Meta-Llama-3.2-3B-Instruct",
|
||||
messages=conversation_state, # type: ignore
|
||||
temperature=0.1,
|
||||
top_p=0.1,
|
||||
)
|
||||
response = {"role": "assistant", "content": request.choices[0].message.content}
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise WebRTCError(str(e) + "\n" + traceback.format_exc())
|
||||
|
||||
conversation_state.append(response)
|
||||
gradio_chatbot.append(response)
|
||||
|
||||
yield AdditionalOutputs(gradio_chatbot, conversation_state)
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(type="messages", value=[])
|
||||
state = gr.State(value=[])
|
||||
stream = Stream(
|
||||
ReplyOnStopWords(
|
||||
response, # type: ignore
|
||||
stop_words=["computer"],
|
||||
input_sample_rate=16000,
|
||||
),
|
||||
mode="send",
|
||||
modality="audio",
|
||||
additional_inputs=[chatbot, state],
|
||||
additional_outputs=[chatbot, state],
|
||||
additional_outputs_handler=lambda *a: (a[2], a[3]),
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
)
|
||||
app = FastAPI()
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class InputData(BaseModel):
|
||||
webrtc_id: str
|
||||
chatbot: list[Message]
|
||||
state: list[Message]
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def _():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (curr_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
@app.post("/input_hook")
|
||||
async def _(data: InputData):
|
||||
body = data.model_dump()
|
||||
stream.set_input(data.webrtc_id, body["chatbot"], body["state"])
|
||||
|
||||
|
||||
def audio_to_base64(file_path):
|
||||
audio_format = "wav"
|
||||
with open(file_path, "rb") as audio_file:
|
||||
encoded_audio = base64.b64encode(audio_file.read()).decode("utf-8")
|
||||
return f"data:audio/{audio_format};base64,{encoded_audio}"
|
||||
|
||||
|
||||
@app.get("/outputs")
|
||||
async def _(webrtc_id: str):
|
||||
async def output_stream():
|
||||
async for output in stream.output_stream(webrtc_id):
|
||||
chatbot = output.args[0]
|
||||
state = output.args[1]
|
||||
data = {
|
||||
"message": state[-1],
|
||||
"audio": audio_to_base64(chatbot[-1]["content"].value["path"])
|
||||
if chatbot[-1]["role"] == "user"
|
||||
else None,
|
||||
}
|
||||
yield f"event: output\ndata: {json.dumps(data)}\n\n"
|
||||
|
||||
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
raise ValueError("Phone mode not supported")
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
486
demo/hello_computer/index.html
Normal file
486
demo/hello_computer/index.html
Normal file
@@ -0,0 +1,486 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Hello Computer 💻</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
|
||||
background-color: #f8f9fa;
|
||||
color: #1a1a1a;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
height: 100vh;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
height: calc(100% - 100px);
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: center;
|
||||
margin-bottom: 40px;
|
||||
}
|
||||
|
||||
.chat-container {
|
||||
background: white;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
||||
padding: 20px;
|
||||
height: 90%;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.chat-messages {
|
||||
flex-grow: 1;
|
||||
overflow-y: auto;
|
||||
margin-bottom: 20px;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.message {
|
||||
margin-bottom: 20px;
|
||||
padding: 12px;
|
||||
border-radius: 8px;
|
||||
font-size: 14px;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.message.user {
|
||||
background-color: #e9ecef;
|
||||
margin-left: 20%;
|
||||
}
|
||||
|
||||
.message.assistant {
|
||||
background-color: #f1f3f5;
|
||||
margin-right: 20%;
|
||||
}
|
||||
|
||||
.controls {
|
||||
text-align: center;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
button {
|
||||
background-color: #0066cc;
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 12px 24px;
|
||||
font-family: inherit;
|
||||
font-size: 14px;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s;
|
||||
border-radius: 4px;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
background-color: #0052a3;
|
||||
}
|
||||
|
||||
#audio-output {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.icon-with-spinner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.spinner {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border: 2px solid #ffffff;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.pulse-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.pulse-circle {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
background-color: #ffffff;
|
||||
opacity: 0.2;
|
||||
flex-shrink: 0;
|
||||
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||
transition: transform 0.1s ease;
|
||||
}
|
||||
|
||||
/* Add styles for typing indicator */
|
||||
.typing-indicator {
|
||||
padding: 8px;
|
||||
background-color: #f1f3f5;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 10px;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.dots {
|
||||
display: inline-flex;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
background-color: #0066cc;
|
||||
border-radius: 50%;
|
||||
animation: pulse 1.5s infinite;
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.dot:nth-child(2) {
|
||||
animation-delay: 0.5s;
|
||||
}
|
||||
|
||||
.dot:nth-child(3) {
|
||||
animation-delay: 1s;
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
|
||||
0%,
|
||||
100% {
|
||||
opacity: 0.5;
|
||||
transform: scale(1);
|
||||
}
|
||||
|
||||
50% {
|
||||
opacity: 1;
|
||||
transform: scale(1.2);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div class="container">
|
||||
<div class="logo">
|
||||
<h1>Hello Computer 💻</h1>
|
||||
<h2 style="font-size: 1.2em; color: #666; margin-top: 10px;">Say 'Computer' before asking your question</h2>
|
||||
</div>
|
||||
<div class="chat-container">
|
||||
<div class="chat-messages" id="chat-messages"></div>
|
||||
<div class="typing-indicator" id="typing-indicator">
|
||||
<div class="dots">
|
||||
<div class="dot"></div>
|
||||
<div class="dot"></div>
|
||||
<div class="dot"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="controls">
|
||||
<button id="start-button">Start Conversation</button>
|
||||
</div>
|
||||
</div>
|
||||
<audio id="audio-output"></audio>
|
||||
|
||||
<script>
|
||||
let peerConnection;
|
||||
let webrtc_id;
|
||||
const startButton = document.getElementById('start-button');
|
||||
const chatMessages = document.getElementById('chat-messages');
|
||||
|
||||
let audioLevel = 0;
|
||||
let animationFrame;
|
||||
let audioContext, analyser, audioSource;
|
||||
let messages = [];
|
||||
let eventSource;
|
||||
|
||||
function updateButtonState() {
|
||||
const button = document.getElementById('start-button');
|
||||
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||
button.innerHTML = `
|
||||
<div class="icon-with-spinner">
|
||||
<div class="spinner"></div>
|
||||
<span>Connecting...</span>
|
||||
</div>
|
||||
`;
|
||||
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||
button.innerHTML = `
|
||||
<div class="pulse-container">
|
||||
<div class="pulse-circle"></div>
|
||||
<span>Stop Conversation</span>
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
button.innerHTML = 'Start Conversation';
|
||||
}
|
||||
}
|
||||
|
||||
function setupAudioVisualization(stream) {
|
||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||
analyser = audioContext.createAnalyser();
|
||||
audioSource = audioContext.createMediaStreamSource(stream);
|
||||
audioSource.connect(analyser);
|
||||
analyser.fftSize = 64;
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
function updateAudioLevel() {
|
||||
analyser.getByteFrequencyData(dataArray);
|
||||
const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
|
||||
audioLevel = average / 255;
|
||||
|
||||
const pulseCircle = document.querySelector('.pulse-circle');
|
||||
if (pulseCircle) {
|
||||
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||
}
|
||||
|
||||
animationFrame = requestAnimationFrame(updateAudioLevel);
|
||||
}
|
||||
updateAudioLevel();
|
||||
}
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.className = 'toast error';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
function handleMessage(event) {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
const typingIndicator = document.getElementById('typing-indicator');
|
||||
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
} else if (eventJson.type === "send_input") {
|
||||
fetch('/input_hook', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
webrtc_id: webrtc_id,
|
||||
chatbot: messages,
|
||||
state: messages
|
||||
})
|
||||
});
|
||||
} else if (eventJson.type === "log") {
|
||||
if (eventJson.data === "pause_detected") {
|
||||
typingIndicator.style.display = 'block';
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
} else if (eventJson.data === "response_starting") {
|
||||
typingIndicator.style.display = 'none';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
|
||||
setupAudioVisualization(stream);
|
||||
|
||||
stream.getTracks().forEach(track => {
|
||||
peerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
const dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = handleMessage;
|
||||
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
console.log('connectionstatechange', peerConnection.connectionState);
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
updateButtonState();
|
||||
});
|
||||
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
eventSource = new EventSource('/outputs?webrtc_id=' + webrtc_id);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
console.log(eventJson);
|
||||
messages.push(eventJson.message);
|
||||
addMessage(eventJson.message.role, eventJson.audio ?? eventJson.message.content);
|
||||
});
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
}
|
||||
}
|
||||
|
||||
function addMessage(role, content) {
|
||||
const messageDiv = document.createElement('div');
|
||||
messageDiv.classList.add('message', role);
|
||||
|
||||
if (role === 'user') {
|
||||
// Create audio element for user messages
|
||||
const audio = document.createElement('audio');
|
||||
audio.controls = true;
|
||||
audio.src = content;
|
||||
messageDiv.appendChild(audio);
|
||||
} else {
|
||||
// Text content for assistant messages
|
||||
messageDiv.textContent = content;
|
||||
}
|
||||
|
||||
chatMessages.appendChild(messageDiv);
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
}
|
||||
|
||||
function stop() {
|
||||
if (eventSource) {
|
||||
eventSource.close();
|
||||
eventSource = null;
|
||||
}
|
||||
|
||||
if (animationFrame) {
|
||||
cancelAnimationFrame(animationFrame);
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
audioContext = null;
|
||||
analyser = null;
|
||||
audioSource = null;
|
||||
}
|
||||
if (peerConnection) {
|
||||
if (peerConnection.getTransceivers) {
|
||||
peerConnection.getTransceivers().forEach(transceiver => {
|
||||
if (transceiver.stop) {
|
||||
transceiver.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (peerConnection.getSenders) {
|
||||
peerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
peerConnection.close();
|
||||
}
|
||||
updateButtonState();
|
||||
audioLevel = 0;
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
if (!peerConnection || peerConnection.connectionState !== 'connected') {
|
||||
setupWebRTC();
|
||||
} else {
|
||||
stop();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
4
demo/hello_computer/requirements.txt
Normal file
4
demo/hello_computer/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc[stopword]
|
||||
python-dotenv
|
||||
openai
|
||||
twilio
|
||||
16
demo/llama_code_editor/README.md
Normal file
16
demo/llama_code_editor/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
title: Llama Code Editor
|
||||
emoji: 🦙
|
||||
colorFrom: indigo
|
||||
colorTo: pink
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Create interactive HTML web pages with your voice
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN,
|
||||
secret|SAMBANOVA_API_KEY, secret|GROQ_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
45
demo/llama_code_editor/app.py
Normal file
45
demo/llama_code_editor/app.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import RedirectResponse
|
||||
from fastrtc import Stream
|
||||
from gradio.utils import get_space
|
||||
|
||||
try:
|
||||
from demo.llama_code_editor.handler import (
|
||||
CodeHandler,
|
||||
)
|
||||
from demo.llama_code_editor.ui import demo as ui
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
from handler import CodeHandler
|
||||
from ui import demo as ui
|
||||
|
||||
|
||||
stream = Stream(
|
||||
handler=CodeHandler,
|
||||
modality="audio",
|
||||
mode="send-receive",
|
||||
concurrency_limit=10 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
stream.ui = ui
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def _():
|
||||
url = "/ui" if not get_space() else "https://fastrtc-llama-code-editor.hf.space/ui/"
|
||||
return RedirectResponse(url)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860, server_name="0.0.0.0")
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
37
demo/llama_code_editor/assets/sandbox.html
Normal file
37
demo/llama_code_editor/assets/sandbox.html
Normal file
@@ -0,0 +1,37 @@
|
||||
<div style="
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-height: 400px;
|
||||
background: linear-gradient(135deg, #f5f7fa 0%, #e4e8ec 100%);
|
||||
border-radius: 8px;
|
||||
border: 2px dashed #cbd5e1;
|
||||
padding: 2rem;
|
||||
text-align: center;
|
||||
color: #64748b;
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
">
|
||||
<div style="
|
||||
width: 80px;
|
||||
height: 80px;
|
||||
margin-bottom: 1.5rem;
|
||||
border: 3px solid #cbd5e1;
|
||||
border-radius: 12px;
|
||||
position: relative;
|
||||
">
|
||||
<div style="
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
font-size: 2rem;
|
||||
">📦</div>
|
||||
</div>
|
||||
<h2 style="
|
||||
margin: 0 0 0.5rem 0;
|
||||
font-size: 1.5rem;
|
||||
font-weight: 600;
|
||||
color: #475569;
|
||||
">No Application Created</h2>
|
||||
</div>
|
||||
60
demo/llama_code_editor/assets/spinner.html
Normal file
60
demo/llama_code_editor/assets/spinner.html
Normal file
@@ -0,0 +1,60 @@
|
||||
<div style="
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-height: 400px;
|
||||
background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%);
|
||||
border-radius: 8px;
|
||||
padding: 2rem;
|
||||
text-align: center;
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
">
|
||||
<!-- Spinner container -->
|
||||
<div style="
|
||||
position: relative;
|
||||
width: 64px;
|
||||
height: 64px;
|
||||
margin-bottom: 1.5rem;
|
||||
">
|
||||
<!-- Static ring -->
|
||||
<div style="
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border: 4px solid #e2e8f0;
|
||||
border-radius: 50%;
|
||||
"></div>
|
||||
<!-- Animated spinner -->
|
||||
<div style="
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border: 4px solid transparent;
|
||||
border-top-color: #3b82f6;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
"></div>
|
||||
</div>
|
||||
|
||||
<!-- Text content -->
|
||||
<h2 style="
|
||||
margin: 0 0 0.5rem 0;
|
||||
font-size: 1.25rem;
|
||||
font-weight: 600;
|
||||
color: #475569;
|
||||
">Generating your application...</h2>
|
||||
|
||||
<p style="
|
||||
margin: 0;
|
||||
font-size: 0.875rem;
|
||||
color: #64748b;
|
||||
">This may take a few moments</p>
|
||||
|
||||
<style>
|
||||
@keyframes spin {
|
||||
0% { transform: rotate(0deg); }
|
||||
100% { transform: rotate(360deg); }
|
||||
}
|
||||
</style>
|
||||
</div>
|
||||
73
demo/llama_code_editor/handler.py
Normal file
73
demo/llama_code_editor/handler.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnPause,
|
||||
audio_to_bytes,
|
||||
)
|
||||
from groq import Groq
|
||||
|
||||
load_dotenv()
|
||||
|
||||
groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=os.environ.get("SAMBANOVA_API_KEY"),
|
||||
base_url="https://api.sambanova.ai/v1",
|
||||
)
|
||||
|
||||
path = Path(__file__).parent / "assets"
|
||||
|
||||
spinner_html = open(path / "spinner.html").read()
|
||||
|
||||
|
||||
system_prompt = "You are an AI coding assistant. Your task is to write single-file HTML applications based on a user's request. Only return the necessary code. Include all necessary imports and styles. You may also be asked to edit your original response."
|
||||
user_prompt = "Please write a single-file HTML application to fulfill the following request.\nThe message:{user_message}\nCurrent code you have written:{code}"
|
||||
|
||||
|
||||
def extract_html_content(text):
|
||||
"""
|
||||
Extract content including HTML tags.
|
||||
"""
|
||||
match = re.search(r"<!DOCTYPE html>.*?</html>", text, re.DOTALL)
|
||||
return match.group(0) if match else None
|
||||
|
||||
|
||||
def display_in_sandbox(code):
|
||||
encoded_html = base64.b64encode(code.encode("utf-8")).decode("utf-8")
|
||||
data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
|
||||
return f'<iframe src="{data_uri}" width="100%" height="600px"></iframe>'
|
||||
|
||||
|
||||
def generate(user_message: tuple[int, np.ndarray], history: list[dict], code: str):
|
||||
yield AdditionalOutputs(history, spinner_html)
|
||||
|
||||
text = groq_client.audio.transcriptions.create(
|
||||
file=("audio-file.mp3", audio_to_bytes(user_message)),
|
||||
model="whisper-large-v3-turbo",
|
||||
response_format="verbose_json",
|
||||
).text
|
||||
|
||||
user_msg_formatted = user_prompt.format(user_message=text, code=code)
|
||||
history.append({"role": "user", "content": user_msg_formatted})
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model="Meta-Llama-3.1-70B-Instruct",
|
||||
messages=history, # type: ignore
|
||||
temperature=0.1,
|
||||
top_p=0.1,
|
||||
)
|
||||
|
||||
output = response.choices[0].message.content
|
||||
html_code = extract_html_content(output)
|
||||
history.append({"role": "assistant", "content": output})
|
||||
yield AdditionalOutputs(history, html_code)
|
||||
|
||||
|
||||
CodeHandler = ReplyOnPause(generate) # type: ignore
|
||||
5
demo/llama_code_editor/requirements.in
Normal file
5
demo/llama_code_editor/requirements.in
Normal file
@@ -0,0 +1,5 @@
|
||||
fastrtc[vad]
|
||||
groq
|
||||
openai
|
||||
python-dotenv
|
||||
twilio
|
||||
295
demo/llama_code_editor/requirements.txt
Normal file
295
demo/llama_code_editor/requirements.txt
Normal file
@@ -0,0 +1,295 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile demo/llama_code_editor/requirements.in -o demo/llama_code_editor/requirements.txt
|
||||
aiofiles==23.2.1
|
||||
# via gradio
|
||||
aiohappyeyeballs==2.4.6
|
||||
# via aiohttp
|
||||
aiohttp==3.11.12
|
||||
# via
|
||||
# aiohttp-retry
|
||||
# twilio
|
||||
aiohttp-retry==2.9.1
|
||||
# via twilio
|
||||
aioice==0.9.0
|
||||
# via aiortc
|
||||
aiortc==1.10.1
|
||||
# via fastrtc
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
annotated-types==0.7.0
|
||||
# via pydantic
|
||||
anyio==4.6.2.post1
|
||||
# via
|
||||
# gradio
|
||||
# groq
|
||||
# httpx
|
||||
# openai
|
||||
# starlette
|
||||
attrs==25.1.0
|
||||
# via aiohttp
|
||||
audioread==3.0.1
|
||||
# via librosa
|
||||
av==12.3.0
|
||||
# via aiortc
|
||||
certifi==2024.8.30
|
||||
# via
|
||||
# httpcore
|
||||
# httpx
|
||||
# requests
|
||||
cffi==1.17.1
|
||||
# via
|
||||
# aiortc
|
||||
# cryptography
|
||||
# pylibsrtp
|
||||
# soundfile
|
||||
charset-normalizer==3.4.0
|
||||
# via requests
|
||||
click==8.1.7
|
||||
# via
|
||||
# typer
|
||||
# uvicorn
|
||||
coloredlogs==15.0.1
|
||||
# via onnxruntime
|
||||
cryptography==43.0.3
|
||||
# via
|
||||
# aiortc
|
||||
# pyopenssl
|
||||
decorator==5.1.1
|
||||
# via librosa
|
||||
distro==1.9.0
|
||||
# via
|
||||
# groq
|
||||
# openai
|
||||
dnspython==2.7.0
|
||||
# via aioice
|
||||
fastapi==0.115.5
|
||||
# via gradio
|
||||
fastrtc==0.0.2.post4
|
||||
# via -r demo/llama_code_editor/requirements.in
|
||||
ffmpy==0.4.0
|
||||
# via gradio
|
||||
filelock==3.16.1
|
||||
# via huggingface-hub
|
||||
flatbuffers==24.3.25
|
||||
# via onnxruntime
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
fsspec==2024.10.0
|
||||
# via
|
||||
# gradio-client
|
||||
# huggingface-hub
|
||||
google-crc32c==1.6.0
|
||||
# via aiortc
|
||||
gradio==5.16.0
|
||||
# via fastrtc
|
||||
gradio-client==1.7.0
|
||||
# via gradio
|
||||
groq==0.18.0
|
||||
# via -r demo/llama_code_editor/requirements.in
|
||||
h11==0.14.0
|
||||
# via
|
||||
# httpcore
|
||||
# uvicorn
|
||||
httpcore==1.0.7
|
||||
# via httpx
|
||||
httpx==0.27.2
|
||||
# via
|
||||
# gradio
|
||||
# gradio-client
|
||||
# groq
|
||||
# openai
|
||||
# safehttpx
|
||||
huggingface-hub==0.28.1
|
||||
# via
|
||||
# gradio
|
||||
# gradio-client
|
||||
humanfriendly==10.0
|
||||
# via coloredlogs
|
||||
idna==3.10
|
||||
# via
|
||||
# anyio
|
||||
# httpx
|
||||
# requests
|
||||
# yarl
|
||||
ifaddr==0.2.0
|
||||
# via aioice
|
||||
jinja2==3.1.4
|
||||
# via gradio
|
||||
jiter==0.7.1
|
||||
# via openai
|
||||
joblib==1.4.2
|
||||
# via
|
||||
# librosa
|
||||
# scikit-learn
|
||||
lazy-loader==0.4
|
||||
# via librosa
|
||||
librosa==0.10.2.post1
|
||||
# via fastrtc
|
||||
llvmlite==0.43.0
|
||||
# via numba
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
markupsafe==2.1.5
|
||||
# via
|
||||
# gradio
|
||||
# jinja2
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
mpmath==1.3.0
|
||||
# via sympy
|
||||
msgpack==1.1.0
|
||||
# via librosa
|
||||
multidict==6.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
numba==0.60.0
|
||||
# via librosa
|
||||
numpy==2.0.2
|
||||
# via
|
||||
# gradio
|
||||
# librosa
|
||||
# numba
|
||||
# onnxruntime
|
||||
# pandas
|
||||
# scikit-learn
|
||||
# scipy
|
||||
# soxr
|
||||
onnxruntime==1.20.1
|
||||
# via fastrtc
|
||||
openai==1.54.4
|
||||
# via -r demo/llama_code_editor/requirements.in
|
||||
orjson==3.10.11
|
||||
# via gradio
|
||||
packaging==24.2
|
||||
# via
|
||||
# gradio
|
||||
# gradio-client
|
||||
# huggingface-hub
|
||||
# lazy-loader
|
||||
# onnxruntime
|
||||
# pooch
|
||||
pandas==2.2.3
|
||||
# via gradio
|
||||
pillow==11.0.0
|
||||
# via gradio
|
||||
platformdirs==4.3.6
|
||||
# via pooch
|
||||
pooch==1.8.2
|
||||
# via librosa
|
||||
propcache==0.2.1
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
protobuf==5.28.3
|
||||
# via onnxruntime
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pydantic==2.9.2
|
||||
# via
|
||||
# fastapi
|
||||
# gradio
|
||||
# groq
|
||||
# openai
|
||||
pydantic-core==2.23.4
|
||||
# via pydantic
|
||||
pydub==0.25.1
|
||||
# via gradio
|
||||
pyee==12.1.1
|
||||
# via aiortc
|
||||
pygments==2.18.0
|
||||
# via rich
|
||||
pyjwt==2.10.1
|
||||
# via twilio
|
||||
pylibsrtp==0.10.0
|
||||
# via aiortc
|
||||
pyopenssl==24.2.1
|
||||
# via aiortc
|
||||
python-dateutil==2.9.0.post0
|
||||
# via pandas
|
||||
python-dotenv==1.0.1
|
||||
# via -r demo/llama_code_editor/requirements.in
|
||||
python-multipart==0.0.20
|
||||
# via gradio
|
||||
pytz==2024.2
|
||||
# via pandas
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# gradio
|
||||
# huggingface-hub
|
||||
requests==2.32.3
|
||||
# via
|
||||
# huggingface-hub
|
||||
# pooch
|
||||
# twilio
|
||||
rich==13.9.4
|
||||
# via typer
|
||||
ruff==0.9.6
|
||||
# via gradio
|
||||
safehttpx==0.1.6
|
||||
# via gradio
|
||||
scikit-learn==1.5.2
|
||||
# via librosa
|
||||
scipy==1.14.1
|
||||
# via
|
||||
# librosa
|
||||
# scikit-learn
|
||||
semantic-version==2.10.0
|
||||
# via gradio
|
||||
shellingham==1.5.4
|
||||
# via typer
|
||||
six==1.16.0
|
||||
# via python-dateutil
|
||||
sniffio==1.3.1
|
||||
# via
|
||||
# anyio
|
||||
# groq
|
||||
# httpx
|
||||
# openai
|
||||
soundfile==0.12.1
|
||||
# via librosa
|
||||
soxr==0.5.0.post1
|
||||
# via librosa
|
||||
starlette==0.41.3
|
||||
# via
|
||||
# fastapi
|
||||
# gradio
|
||||
sympy==1.13.3
|
||||
# via onnxruntime
|
||||
threadpoolctl==3.5.0
|
||||
# via scikit-learn
|
||||
tomlkit==0.12.0
|
||||
# via gradio
|
||||
tqdm==4.67.0
|
||||
# via
|
||||
# huggingface-hub
|
||||
# openai
|
||||
twilio==9.4.5
|
||||
# via -r demo/llama_code_editor/requirements.in
|
||||
typer==0.13.1
|
||||
# via gradio
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# fastapi
|
||||
# gradio
|
||||
# gradio-client
|
||||
# groq
|
||||
# huggingface-hub
|
||||
# librosa
|
||||
# openai
|
||||
# pydantic
|
||||
# pydantic-core
|
||||
# pyee
|
||||
# typer
|
||||
tzdata==2024.2
|
||||
# via pandas
|
||||
urllib3==2.2.3
|
||||
# via requests
|
||||
uvicorn==0.32.0
|
||||
# via gradio
|
||||
websockets==12.0
|
||||
# via gradio-client
|
||||
yarl==1.18.3
|
||||
# via aiohttp
|
||||
75
demo/llama_code_editor/ui.py
Normal file
75
demo/llama_code_editor/ui.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
from dotenv import load_dotenv
|
||||
from fastrtc import WebRTC, get_twilio_turn_credentials
|
||||
from gradio.utils import get_space
|
||||
|
||||
try:
|
||||
from demo.llama_code_editor.handler import (
|
||||
CodeHandler,
|
||||
display_in_sandbox,
|
||||
system_prompt,
|
||||
)
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
from handler import CodeHandler, display_in_sandbox, system_prompt
|
||||
|
||||
load_dotenv()
|
||||
|
||||
path = Path(__file__).parent / "assets"
|
||||
|
||||
with gr.Blocks(css=".code-component {max-height: 500px !important}") as demo:
|
||||
history = gr.State([{"role": "system", "content": system_prompt}])
|
||||
with gr.Row():
|
||||
with gr.Column(scale=1):
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Llama Code Editor
|
||||
</h1>
|
||||
<h2 style='text-align: center'>
|
||||
Powered by SambaNova and Gradio-WebRTC ⚡️
|
||||
</h2>
|
||||
<p style='text-align: center'>
|
||||
Create and edit single-file HTML applications with just your voice!
|
||||
</p>
|
||||
<p style='text-align: center'>
|
||||
Each conversation is limited to 90 seconds. Once the time limit is up you can rejoin the conversation.
|
||||
</p>
|
||||
"""
|
||||
)
|
||||
webrtc = WebRTC(
|
||||
rtc_configuration=get_twilio_turn_credentials()
|
||||
if get_space()
|
||||
else None,
|
||||
mode="send",
|
||||
modality="audio",
|
||||
)
|
||||
with gr.Column(scale=10):
|
||||
with gr.Tabs():
|
||||
with gr.Tab("Sandbox"):
|
||||
sandbox = gr.HTML(value=open(path / "sandbox.html").read())
|
||||
with gr.Tab("Code"):
|
||||
code = gr.Code(
|
||||
language="html",
|
||||
max_lines=50,
|
||||
interactive=False,
|
||||
elem_classes="code-component",
|
||||
)
|
||||
with gr.Tab("Chat"):
|
||||
cb = gr.Chatbot(type="messages")
|
||||
|
||||
webrtc.stream(
|
||||
CodeHandler,
|
||||
inputs=[webrtc, history, code],
|
||||
outputs=[webrtc],
|
||||
time_limit=90 if get_space() else None,
|
||||
concurrency_limit=10 if get_space() else None,
|
||||
)
|
||||
webrtc.on_additional_outputs(
|
||||
lambda history, code: (history, code, history), outputs=[history, code, cb]
|
||||
)
|
||||
code.change(display_in_sandbox, code, sandbox, queue=False)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
15
demo/llm_voice_chat/README.md
Normal file
15
demo/llm_voice_chat/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: LLM Voice Chat
|
||||
emoji: 💻
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Talk to an LLM with ElevenLabs
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GROQ_API_KEY, secret|ELEVENLABS_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
15
demo/llm_voice_chat/README_gradio.md
Normal file
15
demo/llm_voice_chat/README_gradio.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: LLM Voice Chat (Gradio)
|
||||
emoji: 💻
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: LLM Voice by ElevenLabs (Gradio)
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GROQ_API_KEY, secret|ELEVENLABS_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
101
demo/llm_voice_chat/app.py
Normal file
101
demo/llm_voice_chat/app.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import os
|
||||
import time
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
from elevenlabs import ElevenLabs
|
||||
from fastapi import FastAPI
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnPause,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
get_stt_model,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from gradio.utils import get_space
|
||||
from groq import Groq
|
||||
from numpy.typing import NDArray
|
||||
|
||||
load_dotenv()
|
||||
groq_client = Groq()
|
||||
tts_client = ElevenLabs(api_key=os.getenv("ELEVENLABS_API_KEY"))
|
||||
stt_model = get_stt_model()
|
||||
|
||||
|
||||
# See "Talk to Claude" in Cookbook for an example of how to keep
|
||||
# track of the chat history.
|
||||
def response(
|
||||
audio: tuple[int, NDArray[np.int16 | np.float32]],
|
||||
chatbot: list[dict] | None = None,
|
||||
):
|
||||
try:
|
||||
chatbot = chatbot or []
|
||||
messages = [{"role": d["role"], "content": d["content"]} for d in chatbot]
|
||||
start = time.time()
|
||||
text = stt_model.stt(audio)
|
||||
print("transcription", time.time() - start)
|
||||
print("prompt", text)
|
||||
chatbot.append({"role": "user", "content": text})
|
||||
yield AdditionalOutputs(chatbot)
|
||||
messages.append({"role": "user", "content": text})
|
||||
response_text = (
|
||||
groq_client.chat.completions.create(
|
||||
model="llama-3.1-8b-instant",
|
||||
max_tokens=512,
|
||||
messages=messages, # type: ignore
|
||||
)
|
||||
.choices[0]
|
||||
.message.content
|
||||
)
|
||||
|
||||
chatbot.append({"role": "assistant", "content": response_text})
|
||||
|
||||
for chunk in tts_client.text_to_speech.convert_as_stream(
|
||||
text=response_text, # type: ignore
|
||||
voice_id="JBFqnCBsd6RMkjVDRZzb",
|
||||
model_id="eleven_multilingual_v2",
|
||||
output_format="pcm_24000",
|
||||
):
|
||||
audio_array = np.frombuffer(chunk, dtype=np.int16).reshape(1, -1)
|
||||
yield (24000, audio_array)
|
||||
yield AdditionalOutputs(chatbot)
|
||||
except Exception:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise WebRTCError(traceback.format_exc())
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(type="messages")
|
||||
stream = Stream(
|
||||
modality="audio",
|
||||
mode="send-receive",
|
||||
handler=ReplyOnPause(response, input_sample_rate=16000),
|
||||
additional_outputs_handler=lambda a, b: b,
|
||||
additional_inputs=[chatbot],
|
||||
additional_outputs=[chatbot],
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
ui_args={"title": "LLM Voice Chat (Powered by Groq, ElevenLabs, and WebRTC ⚡️)"},
|
||||
)
|
||||
|
||||
# Mount the STREAM UI to the FastAPI app
|
||||
# Because I don't want to build the UI manually
|
||||
app = FastAPI()
|
||||
app = gr.mount_gradio_app(app, stream.ui, path="/")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
os.environ["GRADIO_SSR_MODE"] = "false"
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
stream.ui.launch(server_port=7860)
|
||||
6
demo/llm_voice_chat/requirements.txt
Normal file
6
demo/llm_voice_chat/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
fastrtc[stopword]
|
||||
python-dotenv
|
||||
openai
|
||||
twilio
|
||||
groq
|
||||
elevenlabs
|
||||
15
demo/object_detection/README.md
Normal file
15
demo/object_detection/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Object Detection
|
||||
emoji: 📸
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Use YOLOv10 to detect objects in real-time
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
83
demo/object_detection/app.py
Normal file
83
demo/object_detection/app.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastrtc import Stream, WebRTCError, get_twilio_turn_credentials
|
||||
from gradio.utils import get_space
|
||||
from huggingface_hub import hf_hub_download
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
try:
|
||||
from demo.object_detection.inference import YOLOv10
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
from inference import YOLOv10
|
||||
|
||||
|
||||
cur_dir = Path(__file__).parent
|
||||
|
||||
model_file = hf_hub_download(
|
||||
repo_id="onnx-community/yolov10n", filename="onnx/model.onnx"
|
||||
)
|
||||
|
||||
model = YOLOv10(model_file)
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
try:
|
||||
image = cv2.resize(image, (model.input_width, model.input_height))
|
||||
print("conf_threshold", conf_threshold)
|
||||
new_image = model.detect_objects(image, conf_threshold)
|
||||
return cv2.resize(new_image, (500, 500))
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise WebRTCError(str(e))
|
||||
|
||||
|
||||
stream = Stream(
|
||||
handler=detection,
|
||||
modality="video",
|
||||
mode="send-receive",
|
||||
additional_inputs=[gr.Slider(minimum=0, maximum=1, step=0.01, value=0.3)],
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=2 if get_space() else None,
|
||||
)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def _():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = open(cur_dir / "index.html").read()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
class InputData(BaseModel):
|
||||
webrtc_id: str
|
||||
conf_threshold: float = Field(ge=0, le=1)
|
||||
|
||||
|
||||
@app.post("/input_hook")
|
||||
async def _(data: InputData):
|
||||
stream.set_input(data.webrtc_id, data.conf_threshold)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
340
demo/object_detection/index.html
Normal file
340
demo/object_detection/index.html
Normal file
@@ -0,0 +1,340 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Object Detection</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
background: linear-gradient(135deg, #2d2b52 0%, #191731 100%);
|
||||
color: white;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
height: 100vh;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.container {
|
||||
width: 100%;
|
||||
max-width: 800px;
|
||||
text-align: center;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.video-container {
|
||||
width: 100%;
|
||||
max-width: 500px;
|
||||
aspect-ratio: 1/1;
|
||||
background: rgba(255, 255, 255, 0.1);
|
||||
border-radius: 12px;
|
||||
overflow: hidden;
|
||||
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2);
|
||||
margin: 10px 0;
|
||||
}
|
||||
|
||||
#video-output {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
object-fit: cover;
|
||||
}
|
||||
|
||||
button {
|
||||
background: white;
|
||||
color: #2d2b52;
|
||||
border: none;
|
||||
padding: 12px 32px;
|
||||
border-radius: 24px;
|
||||
font-size: 16px;
|
||||
font-weight: 600;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s ease;
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
button:hover {
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 6px 16px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 2.5em;
|
||||
margin-bottom: 0.3em;
|
||||
}
|
||||
|
||||
p {
|
||||
color: rgba(255, 255, 255, 0.8);
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
.controls {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 12px;
|
||||
align-items: center;
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
.slider-container {
|
||||
width: 100%;
|
||||
max-width: 300px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.slider-container label {
|
||||
color: rgba(255, 255, 255, 0.8);
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
input[type="range"] {
|
||||
width: 100%;
|
||||
height: 6px;
|
||||
-webkit-appearance: none;
|
||||
background: rgba(255, 255, 255, 0.1);
|
||||
border-radius: 3px;
|
||||
outline: none;
|
||||
}
|
||||
|
||||
input[type="range"]::-webkit-slider-thumb {
|
||||
-webkit-appearance: none;
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
background: white;
|
||||
border-radius: 50%;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div class="container">
|
||||
<h1>Real-time Object Detection</h1>
|
||||
<p>Using YOLOv10 to detect objects in your webcam feed</p>
|
||||
<div class="video-container">
|
||||
<video id="video-output" autoplay playsinline></video>
|
||||
</div>
|
||||
<div class="controls">
|
||||
<div class="slider-container">
|
||||
<label>Confidence Threshold: <span id="conf-value">0.3</span></label>
|
||||
<input type="range" id="conf-threshold" min="0" max="1" step="0.01" value="0.3">
|
||||
</div>
|
||||
<button id="start-button">Start</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let peerConnection;
|
||||
let webrtc_id;
|
||||
const startButton = document.getElementById('start-button');
|
||||
const videoOutput = document.getElementById('video-output');
|
||||
const confThreshold = document.getElementById('conf-threshold');
|
||||
const confValue = document.getElementById('conf-value');
|
||||
|
||||
// Update confidence value display
|
||||
confThreshold.addEventListener('input', (e) => {
|
||||
confValue.textContent = e.target.value;
|
||||
if (peerConnection) {
|
||||
updateConfThreshold(e.target.value);
|
||||
}
|
||||
});
|
||||
|
||||
function updateConfThreshold(value) {
|
||||
fetch('/input_hook', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
webrtc_id: webrtc_id,
|
||||
conf_threshold: parseFloat(value)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.className = 'toast error';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
video: true
|
||||
});
|
||||
|
||||
stream.getTracks().forEach(track => {
|
||||
peerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
peerConnection.addEventListener('track', (evt) => {
|
||||
if (videoOutput && videoOutput.srcObject !== evt.streams[0]) {
|
||||
videoOutput.srcObject = evt.streams[0];
|
||||
}
|
||||
});
|
||||
|
||||
const dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
} else if (eventJson.type === "send_input") {
|
||||
updateConfThreshold(confThreshold.value);
|
||||
}
|
||||
};
|
||||
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
startButton.textContent = 'Start';
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
// Send initial confidence threshold
|
||||
updateConfThreshold(confThreshold.value);
|
||||
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
startButton.textContent = 'Start';
|
||||
}
|
||||
}
|
||||
|
||||
function stop() {
|
||||
if (peerConnection) {
|
||||
if (peerConnection.getTransceivers) {
|
||||
peerConnection.getTransceivers().forEach(transceiver => {
|
||||
if (transceiver.stop) {
|
||||
transceiver.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (peerConnection.getSenders) {
|
||||
peerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
peerConnection.close();
|
||||
}, 500);
|
||||
}
|
||||
|
||||
videoOutput.srcObject = null;
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
if (startButton.textContent === 'Start') {
|
||||
setupWebRTC();
|
||||
startButton.textContent = 'Stop';
|
||||
} else {
|
||||
stop();
|
||||
startButton.textContent = 'Start';
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -3,7 +3,11 @@ import time
|
||||
import cv2
|
||||
import numpy as np
|
||||
import onnxruntime
|
||||
from utils import draw_detections
|
||||
|
||||
try:
|
||||
from demo.object_detection.utils import draw_detections
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
from utils import draw_detections
|
||||
|
||||
|
||||
class YOLOv10:
|
||||
@@ -51,7 +55,7 @@ class YOLOv10:
|
||||
self.output_names, {self.input_names[0]: input_tensor}
|
||||
)
|
||||
|
||||
print(f"Inference time: {(time.perf_counter() - start)*1000:.2f} ms")
|
||||
print(f"Inference time: {(time.perf_counter() - start) * 1000:.2f} ms")
|
||||
(
|
||||
boxes,
|
||||
scores,
|
||||
@@ -71,7 +75,7 @@ class YOLOv10:
|
||||
return [], [], []
|
||||
|
||||
# Get the class with the highest confidence
|
||||
class_ids = np.argmax(predictions[:, 4:], axis=1)
|
||||
class_ids = predictions[:, 5].astype(int)
|
||||
|
||||
# Get bounding boxes for each object
|
||||
boxes = self.extract_boxes(predictions)
|
||||
4
demo/object_detection/requirements.txt
Normal file
4
demo/object_detection/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc
|
||||
opencv-python
|
||||
twilio
|
||||
onnxruntime-gpu
|
||||
@@ -170,11 +170,11 @@ def draw_detections(image, boxes, scores, class_ids, mask_alpha=0.3):
|
||||
for class_id, box, score in zip(class_ids, boxes, scores):
|
||||
color = colors[class_id]
|
||||
|
||||
draw_box(det_img, box, color)
|
||||
draw_box(det_img, box, color) # type: ignore
|
||||
|
||||
label = class_names[class_id]
|
||||
caption = f"{label} {int(score * 100)}%"
|
||||
draw_text(det_img, caption, box, color, font_size, text_thickness)
|
||||
draw_text(det_img, caption, box, color, font_size, text_thickness) # type: ignore
|
||||
|
||||
return det_img
|
||||
|
||||
@@ -232,6 +232,6 @@ def draw_masks(
|
||||
x1, y1, x2, y2 = box.astype(int)
|
||||
|
||||
# Draw fill rectangle in mask image
|
||||
cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, -1)
|
||||
cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, -1) # type: ignore
|
||||
|
||||
return cv2.addWeighted(mask_img, mask_alpha, image, 1 - mask_alpha, 0)
|
||||
@@ -1,74 +0,0 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
from huggingface_hub import hf_hub_download
|
||||
from inference import YOLOv10
|
||||
from twilio.rest import Client
|
||||
|
||||
model_file = hf_hub_download(
|
||||
repo_id="onnx-community/yolov10n", filename="onnx/model.onnx"
|
||||
)
|
||||
|
||||
model = YOLOv10(model_file)
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def detection(frame, conf_threshold=0.3):
|
||||
frame = cv2.flip(frame, 0)
|
||||
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
|
||||
|
||||
with gr.Blocks(css=css) as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
YOLOv10 Webcam Stream (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
gr.HTML(
|
||||
"""
|
||||
<h3 style='text-align: center'>
|
||||
<a href='https://arxiv.org/abs/2405.14458' target='_blank'>arXiv</a> | <a href='https://github.com/THU-MIG/yolov10' target='_blank'>github</a>
|
||||
</h3>
|
||||
"""
|
||||
)
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
image = WebRTC(label="Stream", rtc_configuration=rtc_configuration)
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
number = gr.Number()
|
||||
|
||||
image.stream(
|
||||
fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10
|
||||
)
|
||||
image.on_additional_outputs(lambda n: n, outputs=[number])
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
16
demo/phonic_chat/README.md
Normal file
16
demo/phonic_chat/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
title: Phonic AI Chat
|
||||
emoji: 🎙️
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Talk to Phonic AI's speech-to-speech model
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|PHONIC_API_KEY]
|
||||
python_version: 3.11
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
134
demo/phonic_chat/app.py
Normal file
134
demo/phonic_chat/app.py
Normal file
@@ -0,0 +1,134 @@
|
||||
import subprocess
|
||||
|
||||
subprocess.run(["pip", "install", "fastrtc==0.0.3.post7"])
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
from gradio.utils import get_space
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
AsyncStreamHandler,
|
||||
Stream,
|
||||
get_twilio_turn_credentials,
|
||||
WebRTCError,
|
||||
audio_to_float32,
|
||||
)
|
||||
from fastapi import FastAPI
|
||||
from phonic.client import PhonicSTSClient, get_voices
|
||||
|
||||
load_dotenv()
|
||||
|
||||
STS_URI = "wss://api.phonic.co/v1/sts/ws"
|
||||
API_KEY = os.environ["PHONIC_API_KEY"]
|
||||
SAMPLE_RATE = 44_100
|
||||
voices = get_voices(API_KEY)
|
||||
voice_ids = [voice["id"] for voice in voices]
|
||||
|
||||
|
||||
class PhonicHandler(AsyncStreamHandler):
|
||||
def __init__(self):
|
||||
super().__init__(input_sample_rate=SAMPLE_RATE, output_sample_rate=SAMPLE_RATE)
|
||||
self.output_queue = asyncio.Queue()
|
||||
self.client = None
|
||||
|
||||
def copy(self) -> AsyncStreamHandler:
|
||||
return PhonicHandler()
|
||||
|
||||
async def start_up(self):
|
||||
await self.wait_for_args()
|
||||
voice_id = self.latest_args[1]
|
||||
try:
|
||||
async with PhonicSTSClient(STS_URI, API_KEY) as client:
|
||||
self.client = client
|
||||
sts_stream = client.sts( # type: ignore
|
||||
input_format="pcm_44100",
|
||||
output_format="pcm_44100",
|
||||
system_prompt="You are a helpful voice assistant. Respond conversationally.",
|
||||
# welcome_message="Hello! I'm your voice assistant. How can I help you today?",
|
||||
voice_id=voice_id,
|
||||
)
|
||||
async for message in sts_stream:
|
||||
message_type = message.get("type")
|
||||
if message_type == "audio_chunk":
|
||||
audio_b64 = message["audio"]
|
||||
audio_bytes = base64.b64decode(audio_b64)
|
||||
await self.output_queue.put(
|
||||
(SAMPLE_RATE, np.frombuffer(audio_bytes, dtype=np.int16))
|
||||
)
|
||||
if text := message.get("text"):
|
||||
msg = {"role": "assistant", "content": text}
|
||||
await self.output_queue.put(AdditionalOutputs(msg))
|
||||
elif message_type == "input_text":
|
||||
msg = {"role": "user", "content": message["text"]}
|
||||
await self.output_queue.put(AdditionalOutputs(msg))
|
||||
except Exception as e:
|
||||
raise WebRTCError(f"Error starting up: {e}")
|
||||
|
||||
async def emit(self):
|
||||
try:
|
||||
return await self.output_queue.get()
|
||||
except Exception as e:
|
||||
raise WebRTCError(f"Error emitting: {e}")
|
||||
|
||||
async def receive(self, frame: tuple[int, np.ndarray]) -> None:
|
||||
try:
|
||||
if not self.client:
|
||||
return
|
||||
audio_float32 = audio_to_float32(frame)
|
||||
await self.client.send_audio(audio_float32) # type: ignore
|
||||
except Exception as e:
|
||||
raise WebRTCError(f"Error sending audio: {e}")
|
||||
|
||||
async def shutdown(self):
|
||||
if self.client:
|
||||
await self.client._websocket.close()
|
||||
return super().shutdown()
|
||||
|
||||
|
||||
def add_to_chatbot(state, chatbot, message):
|
||||
state.append(message)
|
||||
return state, gr.skip()
|
||||
|
||||
|
||||
state = gr.State(value=[])
|
||||
chatbot = gr.Chatbot(type="messages", value=[])
|
||||
stream = Stream(
|
||||
handler=PhonicHandler(),
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
additional_inputs=[
|
||||
gr.Dropdown(
|
||||
choices=voice_ids,
|
||||
value="katherine",
|
||||
label="Voice",
|
||||
info="Select a voice from the dropdown",
|
||||
)
|
||||
],
|
||||
additional_outputs=[state, chatbot],
|
||||
additional_outputs_handler=add_to_chatbot,
|
||||
ui_args={
|
||||
"title": "Phonic Chat (Powered by FastRTC ⚡️)",
|
||||
},
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
with stream.ui:
|
||||
state.change(lambda s: s, inputs=state, outputs=chatbot)
|
||||
|
||||
app = FastAPI()
|
||||
stream.mount(app)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
stream.ui.launch(server_port=7860)
|
||||
74
demo/phonic_chat/requirements.txt
Normal file
74
demo/phonic_chat/requirements.txt
Normal file
@@ -0,0 +1,74 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv pip compile requirements.in -o requirements.txt
|
||||
aiohappyeyeballs==2.4.6
|
||||
# via aiohttp
|
||||
aiohttp==3.11.12
|
||||
# via
|
||||
# aiohttp-retry
|
||||
# twilio
|
||||
aiohttp-retry==2.9.1
|
||||
# via twilio
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
attrs==25.1.0
|
||||
# via aiohttp
|
||||
certifi==2025.1.31
|
||||
# via requests
|
||||
cffi==1.17.1
|
||||
# via sounddevice
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
fastrtc==0.0.1
|
||||
# via -r requirements.in
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
idna==3.10
|
||||
# via
|
||||
# requests
|
||||
# yarl
|
||||
isort==6.0.0
|
||||
# via phonic-python
|
||||
loguru==0.7.3
|
||||
# via phonic-python
|
||||
multidict==6.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
numpy==2.2.3
|
||||
# via
|
||||
# phonic-python
|
||||
# scipy
|
||||
phonic-python==0.1.3
|
||||
# via -r requirements.in
|
||||
propcache==0.3.0
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pyjwt==2.10.1
|
||||
# via twilio
|
||||
python-dotenv==1.0.1
|
||||
# via
|
||||
# -r requirements.in
|
||||
# phonic-python
|
||||
requests==2.32.3
|
||||
# via
|
||||
# phonic-python
|
||||
# twilio
|
||||
scipy==1.15.2
|
||||
# via phonic-python
|
||||
sounddevice==0.5.1
|
||||
# via phonic-python
|
||||
twilio==9.4.6
|
||||
# via -r requirements.in
|
||||
typing-extensions==4.12.2
|
||||
# via phonic-python
|
||||
urllib3==2.3.0
|
||||
# via requests
|
||||
websockets==15.0
|
||||
# via phonic-python
|
||||
yarl==1.18.3
|
||||
# via aiohttp
|
||||
@@ -1,6 +0,0 @@
|
||||
safetensors==0.4.3
|
||||
opencv-python
|
||||
twilio
|
||||
https://huggingface.co/datasets/freddyaboulton/bucket/resolve/main/gradio-5.0.0b3-py3-none-any.whl
|
||||
https://huggingface.co/datasets/freddyaboulton/bucket/resolve/main/gradio_webrtc-0.0.1-py3-none-any.whl
|
||||
onnxruntime-gpu
|
||||
321
demo/space.py
321
demo/space.py
@@ -1,321 +0,0 @@
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
|
||||
_docs = {
|
||||
"WebRTC": {
|
||||
"description": "Stream audio/video with WebRTC",
|
||||
"members": {
|
||||
"__init__": {
|
||||
"rtc_configuration": {
|
||||
"type": "dict[str, Any] | None",
|
||||
"default": "None",
|
||||
"description": "The configration dictionary to pass to the RTCPeerConnection constructor. If None, the default configuration is used.",
|
||||
},
|
||||
"height": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"width": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"label": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.",
|
||||
},
|
||||
"show_label": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will display label.",
|
||||
},
|
||||
"container": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True, will place the component in a container - providing some extra padding around the border.",
|
||||
},
|
||||
"scale": {
|
||||
"type": "int | None",
|
||||
"default": "None",
|
||||
"description": "relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.",
|
||||
},
|
||||
"min_width": {
|
||||
"type": "int",
|
||||
"default": "160",
|
||||
"description": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.",
|
||||
},
|
||||
"interactive": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.",
|
||||
},
|
||||
"visible": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will be hidden.",
|
||||
},
|
||||
"elem_id": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "an optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"elem_classes": {
|
||||
"type": "list[str] | str | None",
|
||||
"default": "None",
|
||||
"description": "an optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"render": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.",
|
||||
},
|
||||
"key": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.",
|
||||
},
|
||||
"mirror_webcam": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True webcam will be mirrored. Default is True.",
|
||||
},
|
||||
},
|
||||
"events": {"tick": {"type": None, "default": None, "description": ""}},
|
||||
},
|
||||
"__meta__": {"additional_interfaces": {}, "user_fn_refs": {"WebRTC": []}},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
abs_path = os.path.join(os.path.dirname(__file__), "css.css")
|
||||
|
||||
with gr.Blocks(
|
||||
css_paths=abs_path,
|
||||
theme=gr.themes.Default(
|
||||
font_mono=[
|
||||
gr.themes.GoogleFont("Inconsolata"),
|
||||
"monospace",
|
||||
],
|
||||
),
|
||||
) as demo:
|
||||
gr.Markdown(
|
||||
"""
|
||||
<h1 style='text-align: center; margin-bottom: 1rem'> Gradio WebRTC ⚡️ </h1>
|
||||
|
||||
<div style="display: flex; flex-direction: row; justify-content: center">
|
||||
<img style="display: block; padding-right: 5px; height: 20px;" alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.5%20-%20orange">
|
||||
<a href="https://github.com/freddyaboulton/gradio-webrtc" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/github-white?logo=github&logoColor=black"></a>
|
||||
</div>
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
gr.Markdown(
|
||||
"""
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install gradio_webrtc
|
||||
```
|
||||
|
||||
## Examples:
|
||||
1. [Object Detection from Webcam with YOLOv10](https://huggingface.co/spaces/freddyaboulton/webrtc-yolov10n) 📷
|
||||
2. [Streaming Object Detection from Video with RT-DETR](https://huggingface.co/spaces/freddyaboulton/rt-detr-object-detection-webrtc) 🎥
|
||||
3. [Text-to-Speech](https://huggingface.co/spaces/freddyaboulton/parler-tts-streaming-webrtc) 🗣️
|
||||
|
||||
## Usage
|
||||
|
||||
The WebRTC component supports the following three use cases:
|
||||
1. Streaming video from the user webcam to the server and back
|
||||
2. Streaming Video from the server to the client
|
||||
3. Streaming Audio from the server to the client
|
||||
|
||||
Streaming Audio from client to the server and back (conversational AI) is not supported yet.
|
||||
|
||||
|
||||
## Streaming Video from the User Webcam to the Server and Back
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
... your detection code here ...
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
image = WebRTC(label="Stream", mode="send-receive", modality="video")
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
image.stream(
|
||||
fn=detection,
|
||||
inputs=[image, conf_threshold],
|
||||
outputs=[image], time_limit=10
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
|
||||
```
|
||||
* Set the `mode` parameter to `send-receive` and `modality` to "video".
|
||||
* The `stream` event's `fn` parameter is a function that receives the next frame from the webcam
|
||||
as a **numpy array** and returns the processed frame also as a **numpy array**.
|
||||
* Numpy arrays are in (height, width, 3) format where the color channels are in RGB format.
|
||||
* The `inputs` parameter should be a list where the first element is the WebRTC component. The only output allowed is the WebRTC component.
|
||||
* The `time_limit` parameter is the maximum time in seconds the video stream will run. If the time limit is reached, the video stream will stop.
|
||||
|
||||
## Streaming Video from the User Webcam to the Server and Back
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
import cv2
|
||||
|
||||
def generation():
|
||||
url = "https://download.tsi.telecom-paristech.fr/gpac/dataset/dash/uhd/mux_sources/hevcds_720p30_2M.mp4"
|
||||
cap = cv2.VideoCapture(url)
|
||||
iterating = True
|
||||
while iterating:
|
||||
iterating, frame = cap.read()
|
||||
yield frame
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
output_video = WebRTC(label="Video Stream", mode="receive", modality="video")
|
||||
button = gr.Button("Start", variant="primary")
|
||||
output_video.stream(
|
||||
fn=generation, inputs=None, outputs=[output_video],
|
||||
trigger=button.click
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "video".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next frame from the video as a **numpy array**.
|
||||
* The only output allowed is the WebRTC component.
|
||||
* The `trigger` parameter the gradio event that will trigger the webrtc connection. In this case, the button click event.
|
||||
|
||||
## Streaming Audio from the Server to the Client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from pydub import AudioSegment
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file("/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav")
|
||||
yield (segment.frame_rate, np.array(segment.get_array_of_samples()).reshape(1, -1))
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
audio = WebRTC(label="Stream", mode="receive", modality="audio")
|
||||
num_steps = gr.Slider(
|
||||
label="Number of Steps",
|
||||
minimum=1,
|
||||
maximum=10,
|
||||
step=1,
|
||||
value=5,
|
||||
)
|
||||
button = gr.Button("Generate")
|
||||
|
||||
audio.stream(
|
||||
fn=generation, inputs=[num_steps], outputs=[audio],
|
||||
trigger=button.click
|
||||
)
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "audio".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next audio segment as a tuple of (frame_rate, audio_samples).
|
||||
* The numpy array should be of shape (1, num_samples).
|
||||
* The `outputs` parameter should be a list with the WebRTC component as the only element.
|
||||
|
||||
## Deployment
|
||||
|
||||
When deploying in a cloud environment (like Hugging Face Spaces, EC2, etc), you need to set up a TURN server to relay the WebRTC traffic.
|
||||
The easiest way to do this is to use a service like Twilio.
|
||||
|
||||
```python
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
...
|
||||
rtc = WebRTC(rtc_configuration=rtc_configuration, ...)
|
||||
...
|
||||
```
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
gr.Markdown(
|
||||
"""
|
||||
##
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
gr.ParamViewer(value=_docs["WebRTC"]["members"]["__init__"], linkify=[])
|
||||
|
||||
demo.load(
|
||||
None,
|
||||
js=r"""function() {
|
||||
const refs = {};
|
||||
const user_fn_refs = {
|
||||
WebRTC: [], };
|
||||
requestAnimationFrame(() => {
|
||||
|
||||
Object.entries(user_fn_refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}-user-fn`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Object.entries(refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
""",
|
||||
)
|
||||
|
||||
demo.launch()
|
||||
@@ -1,53 +0,0 @@
|
||||
import tempfile
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import AdditionalOutputs, ReplyOnPause, WebRTC
|
||||
from openai import OpenAI
|
||||
from pydub import AudioSegment
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
client = OpenAI()
|
||||
|
||||
|
||||
def transcribe(audio: tuple[int, np.ndarray], transcript: list[dict]):
|
||||
print("audio", audio)
|
||||
segment = AudioSegment(
|
||||
audio[1].tobytes(),
|
||||
frame_rate=audio[0],
|
||||
sample_width=audio[1].dtype.itemsize,
|
||||
channels=1,
|
||||
)
|
||||
|
||||
transcript.append({"role": "user", "content": gr.Audio((audio[0], audio[1].squeeze()))})
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".mp3") as temp_audio:
|
||||
segment.export(temp_audio.name, format="mp3")
|
||||
next_chunk = client.audio.transcriptions.create(
|
||||
model="whisper-1", file=open(temp_audio.name, "rb")
|
||||
).text
|
||||
transcript.append({"role": "assistant", "content": next_chunk})
|
||||
yield AdditionalOutputs(transcript)
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
mode="send",
|
||||
modality="audio",
|
||||
)
|
||||
with gr.Column():
|
||||
transcript = gr.Chatbot(label="transcript", type="messages")
|
||||
|
||||
audio.stream(ReplyOnPause(transcribe), inputs=[audio, transcript], outputs=[audio],
|
||||
time_limit=30)
|
||||
audio.on_additional_outputs(lambda s: s, outputs=transcript)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
15
demo/talk_to_claude/README.md
Normal file
15
demo/talk_to_claude/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Talk to Claude
|
||||
emoji: 👨🦰
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Talk to Anthropic's Claude
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GROQ_API_KEY, secret|ANTHROPIC_API_KEY, secret|ELEVENLABS_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
140
demo/talk_to_claude/app.py
Normal file
140
demo/talk_to_claude/app.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import anthropic
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
from elevenlabs import ElevenLabs
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnPause,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
get_tts_model,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from fastrtc.utils import audio_to_bytes
|
||||
from gradio.utils import get_space
|
||||
from groq import Groq
|
||||
from pydantic import BaseModel
|
||||
|
||||
load_dotenv()
|
||||
|
||||
groq_client = Groq()
|
||||
claude_client = anthropic.Anthropic()
|
||||
tts_client = ElevenLabs(api_key=os.environ["ELEVENLABS_API_KEY"])
|
||||
|
||||
curr_dir = Path(__file__).parent
|
||||
|
||||
tts_model = get_tts_model()
|
||||
|
||||
|
||||
def response(
|
||||
audio: tuple[int, np.ndarray],
|
||||
chatbot: list[dict] | None = None,
|
||||
):
|
||||
try:
|
||||
chatbot = chatbot or []
|
||||
messages = [{"role": d["role"], "content": d["content"]} for d in chatbot]
|
||||
prompt = groq_client.audio.transcriptions.create(
|
||||
file=("audio-file.mp3", audio_to_bytes(audio)),
|
||||
model="whisper-large-v3-turbo",
|
||||
response_format="verbose_json",
|
||||
).text
|
||||
|
||||
print("prompt", prompt)
|
||||
chatbot.append({"role": "user", "content": prompt})
|
||||
yield AdditionalOutputs(chatbot)
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
response = claude_client.messages.create(
|
||||
model="claude-3-5-haiku-20241022",
|
||||
max_tokens=512,
|
||||
messages=messages, # type: ignore
|
||||
)
|
||||
response_text = " ".join(
|
||||
block.text # type: ignore
|
||||
for block in response.content
|
||||
if getattr(block, "type", None) == "text"
|
||||
)
|
||||
chatbot.append({"role": "assistant", "content": response_text})
|
||||
|
||||
start = time.time()
|
||||
|
||||
print("starting tts", start)
|
||||
for i, chunk in enumerate(tts_model.stream_tts_sync(response_text)):
|
||||
print("chunk", i, time.time() - start)
|
||||
yield chunk
|
||||
print("finished tts", time.time() - start)
|
||||
yield AdditionalOutputs(chatbot)
|
||||
except Exception as e:
|
||||
raise WebRTCError(str(e))
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(type="messages")
|
||||
stream = Stream(
|
||||
modality="audio",
|
||||
mode="send-receive",
|
||||
handler=ReplyOnPause(response),
|
||||
additional_outputs_handler=lambda a, b: b,
|
||||
additional_inputs=[chatbot],
|
||||
additional_outputs=[chatbot],
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class InputData(BaseModel):
|
||||
webrtc_id: str
|
||||
chatbot: list[Message]
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def _():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (curr_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content, status_code=200)
|
||||
|
||||
|
||||
@app.post("/input_hook")
|
||||
async def _(body: InputData):
|
||||
stream.set_input(body.webrtc_id, body.model_dump()["chatbot"])
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.get("/outputs")
|
||||
def _(webrtc_id: str):
|
||||
async def output_stream():
|
||||
async for output in stream.output_stream(webrtc_id):
|
||||
chatbot = output.args[0]
|
||||
yield f"event: output\ndata: {json.dumps(chatbot[-1])}\n\n"
|
||||
|
||||
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860, server_name="0.0.0.0")
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
546
demo/talk_to_claude/index.html
Normal file
546
demo/talk_to_claude/index.html
Normal file
@@ -0,0 +1,546 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>RetroChat Audio</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: monospace;
|
||||
background-color: #1a1a1a;
|
||||
color: #00ff00;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
height: 100vh;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.container {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 20px;
|
||||
height: calc(100% - 100px);
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.chat-container {
|
||||
border: 2px solid #00ff00;
|
||||
padding: 20px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex-grow: 1;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.controls-container {
|
||||
border: 2px solid #00ff00;
|
||||
padding: 20px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 20px;
|
||||
height: 128px;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.visualization-container {
|
||||
flex-grow: 1;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.box-container {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
height: 64px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
width: 8px;
|
||||
background: #00ff00;
|
||||
border-radius: 8px;
|
||||
transition: transform 0.05s ease;
|
||||
}
|
||||
|
||||
.chat-messages {
|
||||
flex-grow: 1;
|
||||
overflow-y: auto;
|
||||
margin-bottom: 20px;
|
||||
padding: 10px;
|
||||
border: 1px solid #00ff00;
|
||||
}
|
||||
|
||||
.message {
|
||||
margin-bottom: 10px;
|
||||
padding: 8px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.message.user {
|
||||
background-color: #003300;
|
||||
}
|
||||
|
||||
.message.assistant {
|
||||
background-color: #002200;
|
||||
}
|
||||
|
||||
button {
|
||||
height: 64px;
|
||||
min-width: 120px;
|
||||
background-color: #000;
|
||||
color: #00ff00;
|
||||
border: 2px solid #00ff00;
|
||||
padding: 10px 20px;
|
||||
font-family: monospace;
|
||||
font-size: 16px;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
border-width: 3px;
|
||||
}
|
||||
|
||||
#audio-output {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Retro CRT effect */
|
||||
.crt-overlay {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background: repeating-linear-gradient(0deg,
|
||||
rgba(0, 255, 0, 0.03),
|
||||
rgba(0, 255, 0, 0.03) 1px,
|
||||
transparent 1px,
|
||||
transparent 2px);
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
/* Add these new styles */
|
||||
.icon-with-spinner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.spinner {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border: 2px solid #00ff00;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.pulse-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.pulse-circle {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
background-color: #00ff00;
|
||||
opacity: 0.2;
|
||||
flex-shrink: 0;
|
||||
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||
transition: transform 0.1s ease;
|
||||
}
|
||||
|
||||
/* Add styles for typing indicator */
|
||||
.typing-indicator {
|
||||
padding: 8px;
|
||||
background-color: #002200;
|
||||
border-radius: 4px;
|
||||
margin-bottom: 10px;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.dots {
|
||||
display: inline-flex;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
background-color: #00ff00;
|
||||
border-radius: 50%;
|
||||
animation: pulse 1.5s infinite;
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.dot:nth-child(2) {
|
||||
animation-delay: 0.5s;
|
||||
}
|
||||
|
||||
.dot:nth-child(3) {
|
||||
animation-delay: 1s;
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
|
||||
0%,
|
||||
100% {
|
||||
opacity: 0.5;
|
||||
transform: scale(1);
|
||||
}
|
||||
|
||||
50% {
|
||||
opacity: 1;
|
||||
transform: scale(1.2);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div class="container">
|
||||
<div class="chat-container">
|
||||
<div class="chat-messages" id="chat-messages"></div>
|
||||
<!-- Move typing indicator outside the chat messages -->
|
||||
<div class="typing-indicator" id="typing-indicator">
|
||||
<div class="dots">
|
||||
<div class="dot"></div>
|
||||
<div class="dot"></div>
|
||||
<div class="dot"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="controls-container">
|
||||
<div class="visualization-container">
|
||||
<div class="box-container">
|
||||
<!-- Boxes will be dynamically added here -->
|
||||
</div>
|
||||
</div>
|
||||
<button id="start-button">Start</button>
|
||||
</div>
|
||||
</div>
|
||||
<audio id="audio-output"></audio>
|
||||
|
||||
<script>
|
||||
let audioContext;
|
||||
let analyser_input, analyser_output;
|
||||
let dataArray_input, dataArray_output;
|
||||
let animationId_input, animationId_output;
|
||||
let chatHistory = [];
|
||||
let peerConnection;
|
||||
let webrtc_id;
|
||||
|
||||
const audioOutput = document.getElementById('audio-output');
|
||||
const startButton = document.getElementById('start-button');
|
||||
const chatMessages = document.getElementById('chat-messages');
|
||||
|
||||
function updateButtonState() {
|
||||
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||
startButton.innerHTML = `
|
||||
<div class="icon-with-spinner">
|
||||
<div class="spinner"></div>
|
||||
<span>Connecting...</span>
|
||||
</div>
|
||||
`;
|
||||
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||
startButton.innerHTML = `
|
||||
<div class="pulse-container">
|
||||
<div class="pulse-circle"></div>
|
||||
<span>Stop</span>
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
startButton.innerHTML = 'Start';
|
||||
}
|
||||
}
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.className = 'toast error';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
|
||||
// Set up input visualization
|
||||
audioContext = new AudioContext();
|
||||
analyser_input = audioContext.createAnalyser();
|
||||
const inputSource = audioContext.createMediaStreamSource(stream);
|
||||
inputSource.connect(analyser_input);
|
||||
analyser_input.fftSize = 64;
|
||||
dataArray_input = new Uint8Array(analyser_input.frequencyBinCount);
|
||||
|
||||
function updateAudioLevel() {
|
||||
analyser_input.getByteFrequencyData(dataArray_input);
|
||||
const average = Array.from(dataArray_input).reduce((a, b) => a + b, 0) / dataArray_input.length;
|
||||
const audioLevel = average / 255;
|
||||
|
||||
const pulseCircle = document.querySelector('.pulse-circle');
|
||||
if (pulseCircle) {
|
||||
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||
}
|
||||
|
||||
animationId_input = requestAnimationFrame(updateAudioLevel);
|
||||
}
|
||||
updateAudioLevel();
|
||||
|
||||
stream.getTracks().forEach(track => {
|
||||
peerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
// Add connection state change listener
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
console.log('Connection state:', peerConnection.connectionState);
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
updateButtonState();
|
||||
});
|
||||
|
||||
// Handle incoming audio
|
||||
peerConnection.addEventListener('track', (evt) => {
|
||||
if (audioOutput.srcObject !== evt.streams[0]) {
|
||||
audioOutput.srcObject = evt.streams[0];
|
||||
audioOutput.play();
|
||||
|
||||
// Set up output visualization
|
||||
analyser_output = audioContext.createAnalyser();
|
||||
const outputSource = audioContext.createMediaStreamSource(evt.streams[0]);
|
||||
outputSource.connect(analyser_output);
|
||||
analyser_output.fftSize = 2048;
|
||||
dataArray_output = new Uint8Array(analyser_output.frequencyBinCount);
|
||||
updateVisualization();
|
||||
}
|
||||
});
|
||||
|
||||
// Create data channel for messages
|
||||
const dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
const typingIndicator = document.getElementById('typing-indicator');
|
||||
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
} else if (eventJson.type === "send_input") {
|
||||
fetch('/input_hook', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
webrtc_id: webrtc_id,
|
||||
chatbot: chatHistory
|
||||
})
|
||||
});
|
||||
} else if (eventJson.type === "log") {
|
||||
if (eventJson.data === "pause_detected") {
|
||||
typingIndicator.style.display = 'block';
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
} else if (eventJson.data === "response_starting") {
|
||||
typingIndicator.style.display = 'none';
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Create and send offer
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
// Start visualization
|
||||
updateVisualization();
|
||||
|
||||
// create event stream to receive messages from /output
|
||||
const eventSource = new EventSource('/outputs?webrtc_id=' + webrtc_id);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
addMessage(eventJson.role, eventJson.content);
|
||||
});
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
}
|
||||
}
|
||||
|
||||
function addMessage(role, content) {
|
||||
const messageDiv = document.createElement('div');
|
||||
messageDiv.classList.add('message', role);
|
||||
messageDiv.textContent = content;
|
||||
chatMessages.appendChild(messageDiv);
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
chatHistory.push({ role, content });
|
||||
}
|
||||
|
||||
// Add this after other const declarations
|
||||
const boxContainer = document.querySelector('.box-container');
|
||||
const numBars = 32;
|
||||
for (let i = 0; i < numBars; i++) {
|
||||
const box = document.createElement('div');
|
||||
box.className = 'box';
|
||||
boxContainer.appendChild(box);
|
||||
}
|
||||
|
||||
// Replace the draw function with updateVisualization
|
||||
function updateVisualization() {
|
||||
animationId_output = requestAnimationFrame(updateVisualization);
|
||||
|
||||
analyser_output.getByteFrequencyData(dataArray_output);
|
||||
const bars = document.querySelectorAll('.box');
|
||||
|
||||
for (let i = 0; i < bars.length; i++) {
|
||||
const barHeight = (dataArray_output[i] / 255) * 2;
|
||||
bars[i].style.transform = `scaleY(${Math.max(0.1, barHeight)})`;
|
||||
}
|
||||
}
|
||||
|
||||
function stop() {
|
||||
if (peerConnection) {
|
||||
if (peerConnection.getTransceivers) {
|
||||
peerConnection.getTransceivers().forEach(transceiver => {
|
||||
if (transceiver.stop) {
|
||||
transceiver.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (peerConnection.getSenders) {
|
||||
peerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
|
||||
peerConnection.close();
|
||||
}
|
||||
|
||||
if (animationId_input) {
|
||||
cancelAnimationFrame(animationId_input);
|
||||
}
|
||||
if (animationId_output) {
|
||||
cancelAnimationFrame(animationId_output);
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
}
|
||||
|
||||
updateButtonState();
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
if (startButton.textContent === 'Start') {
|
||||
setupWebRTC();
|
||||
} else {
|
||||
stop();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
6
demo/talk_to_claude/requirements.txt
Normal file
6
demo/talk_to_claude/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
fastrtc[vad, tts]
|
||||
elevenlabs
|
||||
groq
|
||||
anthropic
|
||||
twilio
|
||||
python-dotenv
|
||||
15
demo/talk_to_gemini/README.md
Normal file
15
demo/talk_to_gemini/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Talk to Gemini
|
||||
emoji: ♊️
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Talk to Gemini using Google's multimodal API
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GEMINI_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
15
demo/talk_to_gemini/README_gradio.md
Normal file
15
demo/talk_to_gemini/README_gradio.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Talk to Gemini (Gradio UI)
|
||||
emoji: ♊️
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Talk to Gemini (Gradio UI)
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GEMINI_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
187
demo/talk_to_gemini/app.py
Normal file
187
demo/talk_to_gemini/app.py
Normal file
@@ -0,0 +1,187 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
from typing import AsyncGenerator, Literal
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastrtc import (
|
||||
AsyncStreamHandler,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from google import genai
|
||||
from google.genai.types import (
|
||||
LiveConnectConfig,
|
||||
PrebuiltVoiceConfig,
|
||||
SpeechConfig,
|
||||
VoiceConfig,
|
||||
)
|
||||
from gradio.utils import get_space
|
||||
from pydantic import BaseModel
|
||||
|
||||
current_dir = pathlib.Path(__file__).parent
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def encode_audio(data: np.ndarray) -> str:
|
||||
"""Encode Audio data to send to the server"""
|
||||
return base64.b64encode(data.tobytes()).decode("UTF-8")
|
||||
|
||||
|
||||
class GeminiHandler(AsyncStreamHandler):
|
||||
"""Handler for the Gemini API"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
expected_layout: Literal["mono"] = "mono",
|
||||
output_sample_rate: int = 24000,
|
||||
output_frame_size: int = 480,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
expected_layout,
|
||||
output_sample_rate,
|
||||
output_frame_size,
|
||||
input_sample_rate=16000,
|
||||
)
|
||||
self.input_queue: asyncio.Queue = asyncio.Queue()
|
||||
self.output_queue: asyncio.Queue = asyncio.Queue()
|
||||
self.quit: asyncio.Event = asyncio.Event()
|
||||
|
||||
def copy(self) -> "GeminiHandler":
|
||||
return GeminiHandler(
|
||||
expected_layout="mono",
|
||||
output_sample_rate=self.output_sample_rate,
|
||||
output_frame_size=self.output_frame_size,
|
||||
)
|
||||
|
||||
async def start_up(self):
|
||||
if not self.phone_mode:
|
||||
await self.wait_for_args()
|
||||
api_key, voice_name = self.latest_args[1:]
|
||||
else:
|
||||
api_key, voice_name = None, "Puck"
|
||||
try:
|
||||
client = genai.Client(
|
||||
api_key=api_key or os.getenv("GEMINI_API_KEY"),
|
||||
http_options={"api_version": "v1alpha"},
|
||||
)
|
||||
except Exception as e:
|
||||
raise WebRTCError(str(e))
|
||||
config = LiveConnectConfig(
|
||||
response_modalities=["AUDIO"], # type: ignore
|
||||
speech_config=SpeechConfig(
|
||||
voice_config=VoiceConfig(
|
||||
prebuilt_voice_config=PrebuiltVoiceConfig(
|
||||
voice_name=voice_name,
|
||||
)
|
||||
)
|
||||
),
|
||||
)
|
||||
try:
|
||||
async with client.aio.live.connect(
|
||||
model="gemini-2.0-flash-exp", config=config
|
||||
) as session:
|
||||
async for audio in session.start_stream(
|
||||
stream=self.stream(), mime_type="audio/pcm"
|
||||
):
|
||||
if audio.data:
|
||||
array = np.frombuffer(audio.data, dtype=np.int16)
|
||||
self.output_queue.put_nowait(array)
|
||||
except Exception as e:
|
||||
raise WebRTCError(str(e))
|
||||
|
||||
async def stream(self) -> AsyncGenerator[bytes, None]:
|
||||
while not self.quit.is_set():
|
||||
try:
|
||||
audio = await asyncio.wait_for(self.input_queue.get(), 0.1)
|
||||
yield audio
|
||||
except (asyncio.TimeoutError, TimeoutError):
|
||||
pass
|
||||
|
||||
async def receive(self, frame: tuple[int, np.ndarray]) -> None:
|
||||
_, array = frame
|
||||
array = array.squeeze()
|
||||
audio_message = encode_audio(array)
|
||||
self.input_queue.put_nowait(audio_message)
|
||||
|
||||
async def emit(self) -> tuple[int, np.ndarray]:
|
||||
array = await self.output_queue.get()
|
||||
return (self.output_sample_rate, array)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self.quit.set()
|
||||
self.args_set.clear()
|
||||
|
||||
|
||||
stream = Stream(
|
||||
modality="audio",
|
||||
mode="send-receive",
|
||||
handler=GeminiHandler(),
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
additional_inputs=[
|
||||
gr.Textbox(
|
||||
label="API Key",
|
||||
type="password",
|
||||
value=os.getenv("GEMINI_API_KEY") if not get_space() else "",
|
||||
),
|
||||
gr.Dropdown(
|
||||
label="Voice",
|
||||
choices=[
|
||||
"Puck",
|
||||
"Charon",
|
||||
"Kore",
|
||||
"Fenrir",
|
||||
"Aoede",
|
||||
],
|
||||
value="Puck",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class InputData(BaseModel):
|
||||
webrtc_id: str
|
||||
voice_name: str
|
||||
api_key: str
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
@app.post("/input_hook")
|
||||
async def _(body: InputData):
|
||||
stream.set_input(body.webrtc_id, body.api_key, body.voice_name)
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def index():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (current_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
452
demo/talk_to_gemini/index.html
Normal file
452
demo/talk_to_gemini/index.html
Normal file
@@ -0,0 +1,452 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Gemini Voice Chat</title>
|
||||
<style>
|
||||
:root {
|
||||
--color-accent: #6366f1;
|
||||
--color-background: #0f172a;
|
||||
--color-surface: #1e293b;
|
||||
--color-text: #e2e8f0;
|
||||
--boxSize: 8px;
|
||||
--gutter: 4px;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background-color: var(--color-background);
|
||||
color: var(--color-text);
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
min-height: 100vh;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.container {
|
||||
width: 90%;
|
||||
max-width: 800px;
|
||||
background-color: var(--color-surface);
|
||||
padding: 2rem;
|
||||
border-radius: 1rem;
|
||||
box-shadow: 0 25px 50px -12px rgba(0, 0, 0, 0.25);
|
||||
}
|
||||
|
||||
.wave-container {
|
||||
position: relative;
|
||||
display: flex;
|
||||
min-height: 100px;
|
||||
max-height: 128px;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
margin: 2rem 0;
|
||||
}
|
||||
|
||||
.box-container {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
height: 64px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.box {
|
||||
height: 100%;
|
||||
width: var(--boxSize);
|
||||
background: var(--color-accent);
|
||||
border-radius: 8px;
|
||||
transition: transform 0.05s ease;
|
||||
}
|
||||
|
||||
.controls {
|
||||
display: grid;
|
||||
gap: 1rem;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.input-group {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
label {
|
||||
font-size: 0.875rem;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
input,
|
||||
select {
|
||||
padding: 0.75rem;
|
||||
border-radius: 0.5rem;
|
||||
border: 1px solid rgba(255, 255, 255, 0.1);
|
||||
background-color: var(--color-background);
|
||||
color: var(--color-text);
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
button {
|
||||
padding: 1rem 2rem;
|
||||
border-radius: 0.5rem;
|
||||
border: none;
|
||||
background-color: var(--color-accent);
|
||||
color: white;
|
||||
font-weight: 600;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
opacity: 0.9;
|
||||
transform: translateY(-1px);
|
||||
}
|
||||
|
||||
.icon-with-spinner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.spinner {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border: 2px solid white;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.pulse-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.pulse-circle {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
background-color: white;
|
||||
opacity: 0.2;
|
||||
flex-shrink: 0;
|
||||
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||
transition: transform 0.1s ease;
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div style="text-align: center">
|
||||
<h1>Gemini Voice Chat</h1>
|
||||
<p>Speak with Gemini using real-time audio streaming</p>
|
||||
<p>
|
||||
Get a Gemini API key
|
||||
<a href="https://ai.google.dev/gemini-api/docs/api-key">here</a>
|
||||
</p>
|
||||
</div>
|
||||
<div class="container">
|
||||
<div class="controls">
|
||||
<div class="input-group">
|
||||
<label for="api-key">API Key</label>
|
||||
<input type="password" id="api-key" placeholder="Enter your API key">
|
||||
</div>
|
||||
<div class="input-group">
|
||||
<label for="voice">Voice</label>
|
||||
<select id="voice">
|
||||
<option value="Puck">Puck</option>
|
||||
<option value="Charon">Charon</option>
|
||||
<option value="Kore">Kore</option>
|
||||
<option value="Fenrir">Fenrir</option>
|
||||
<option value="Aoede">Aoede</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="wave-container">
|
||||
<div class="box-container">
|
||||
<!-- Boxes will be dynamically added here -->
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button id="start-button">Start Recording</button>
|
||||
</div>
|
||||
|
||||
<audio id="audio-output"></audio>
|
||||
|
||||
<script>
|
||||
let peerConnection;
|
||||
let audioContext;
|
||||
let dataChannel;
|
||||
let isRecording = false;
|
||||
let webrtc_id;
|
||||
|
||||
const startButton = document.getElementById('start-button');
|
||||
const apiKeyInput = document.getElementById('api-key');
|
||||
const voiceSelect = document.getElementById('voice');
|
||||
const audioOutput = document.getElementById('audio-output');
|
||||
const boxContainer = document.querySelector('.box-container');
|
||||
|
||||
const numBars = 32;
|
||||
for (let i = 0; i < numBars; i++) {
|
||||
const box = document.createElement('div');
|
||||
box.className = 'box';
|
||||
boxContainer.appendChild(box);
|
||||
}
|
||||
|
||||
function updateButtonState() {
|
||||
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||
startButton.innerHTML = `
|
||||
<div class="icon-with-spinner">
|
||||
<div class="spinner"></div>
|
||||
<span>Connecting...</span>
|
||||
</div>
|
||||
`;
|
||||
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||
startButton.innerHTML = `
|
||||
<div class="pulse-container">
|
||||
<div class="pulse-circle"></div>
|
||||
<span>Stop Recording</span>
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
startButton.innerHTML = 'Start Recording';
|
||||
}
|
||||
}
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.className = 'toast error';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
stream.getTracks().forEach(track => peerConnection.addTrack(track, stream));
|
||||
|
||||
// Update audio visualization setup
|
||||
audioContext = new AudioContext();
|
||||
analyser_input = audioContext.createAnalyser();
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
source.connect(analyser_input);
|
||||
analyser_input.fftSize = 64;
|
||||
dataArray_input = new Uint8Array(analyser_input.frequencyBinCount);
|
||||
|
||||
function updateAudioLevel() {
|
||||
analyser_input.getByteFrequencyData(dataArray_input);
|
||||
const average = Array.from(dataArray_input).reduce((a, b) => a + b, 0) / dataArray_input.length;
|
||||
const audioLevel = average / 255;
|
||||
|
||||
const pulseCircle = document.querySelector('.pulse-circle');
|
||||
if (pulseCircle) {
|
||||
console.log("audioLevel", audioLevel);
|
||||
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||
}
|
||||
|
||||
animationId = requestAnimationFrame(updateAudioLevel);
|
||||
}
|
||||
updateAudioLevel();
|
||||
|
||||
// Add connection state change listener
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
console.log('connectionstatechange', peerConnection.connectionState);
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
updateButtonState();
|
||||
});
|
||||
|
||||
// Handle incoming audio
|
||||
peerConnection.addEventListener('track', (evt) => {
|
||||
if (audioOutput && audioOutput.srcObject !== evt.streams[0]) {
|
||||
audioOutput.srcObject = evt.streams[0];
|
||||
audioOutput.play();
|
||||
|
||||
// Set up audio visualization on the output stream
|
||||
audioContext = new AudioContext();
|
||||
analyser = audioContext.createAnalyser();
|
||||
const source = audioContext.createMediaStreamSource(evt.streams[0]);
|
||||
source.connect(analyser);
|
||||
analyser.fftSize = 2048;
|
||||
dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
updateVisualization();
|
||||
}
|
||||
});
|
||||
|
||||
// Create data channel for messages
|
||||
dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
} else if (eventJson.type === "send_input") {
|
||||
fetch('/input_hook', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
webrtc_id: webrtc_id,
|
||||
api_key: apiKeyInput.value,
|
||||
voice_name: voiceSelect.value
|
||||
})
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Create and send offer
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id,
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
startButton.textContent = 'Start Recording';
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
startButton.textContent = 'Start Recording';
|
||||
}
|
||||
}
|
||||
|
||||
function updateVisualization() {
|
||||
if (!analyser) return;
|
||||
|
||||
analyser.getByteFrequencyData(dataArray);
|
||||
const bars = document.querySelectorAll('.box');
|
||||
|
||||
for (let i = 0; i < bars.length; i++) {
|
||||
const barHeight = (dataArray[i] / 255) * 2;
|
||||
bars[i].style.transform = `scaleY(${Math.max(0.1, barHeight)})`;
|
||||
}
|
||||
|
||||
animationId = requestAnimationFrame(updateVisualization);
|
||||
}
|
||||
|
||||
function stopWebRTC() {
|
||||
if (peerConnection) {
|
||||
peerConnection.close();
|
||||
}
|
||||
if (animationId) {
|
||||
cancelAnimationFrame(animationId);
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
}
|
||||
updateButtonState();
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
if (!isRecording) {
|
||||
setupWebRTC();
|
||||
startButton.classList.add('recording');
|
||||
} else {
|
||||
stopWebRTC();
|
||||
startButton.classList.remove('recording');
|
||||
}
|
||||
isRecording = !isRecording;
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
4
demo/talk_to_gemini/requirements.txt
Normal file
4
demo/talk_to_gemini/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc
|
||||
python-dotenv
|
||||
google-genai
|
||||
twilio
|
||||
15
demo/talk_to_openai/README.md
Normal file
15
demo/talk_to_openai/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Talk to OpenAI
|
||||
emoji: 🗣️
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Talk to OpenAI using their multimodal API
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|OPENAI_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
15
demo/talk_to_openai/README_gradio.md
Normal file
15
demo/talk_to_openai/README_gradio.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Talk to OpenAI (Gradio UI)
|
||||
emoji: 🗣️
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Talk to OpenAI (Gradio UI)
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|OPENAI_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
160
demo/talk_to_openai/app.py
Normal file
160
demo/talk_to_openai/app.py
Normal file
@@ -0,0 +1,160 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
AsyncStreamHandler,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from gradio.utils import get_space
|
||||
from openai.types.beta.realtime import ResponseAudioTranscriptDoneEvent
|
||||
|
||||
load_dotenv()
|
||||
|
||||
cur_dir = Path(__file__).parent
|
||||
|
||||
SAMPLE_RATE = 24000
|
||||
|
||||
|
||||
class OpenAIHandler(AsyncStreamHandler):
|
||||
def __init__(
|
||||
self,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
expected_layout="mono",
|
||||
output_sample_rate=SAMPLE_RATE,
|
||||
output_frame_size=480,
|
||||
input_sample_rate=SAMPLE_RATE,
|
||||
)
|
||||
self.connection = None
|
||||
self.output_queue = asyncio.Queue()
|
||||
|
||||
def copy(self):
|
||||
return OpenAIHandler()
|
||||
|
||||
async def start_up(
|
||||
self,
|
||||
):
|
||||
"""Connect to realtime API. Run forever in separate thread to keep connection open."""
|
||||
self.client = openai.AsyncOpenAI()
|
||||
try:
|
||||
async with self.client.beta.realtime.connect(
|
||||
model="gpt-4o-mini-realtime-preview-2024-12-17"
|
||||
) as conn:
|
||||
await conn.session.update(
|
||||
session={"turn_detection": {"type": "server_vad"}}
|
||||
)
|
||||
self.connection = conn
|
||||
async for event in self.connection:
|
||||
if event.type == "response.audio_transcript.done":
|
||||
await self.output_queue.put(AdditionalOutputs(event))
|
||||
if event.type == "response.audio.delta":
|
||||
await self.output_queue.put(
|
||||
(
|
||||
self.output_sample_rate,
|
||||
np.frombuffer(
|
||||
base64.b64decode(event.delta), dtype=np.int16
|
||||
).reshape(1, -1),
|
||||
),
|
||||
)
|
||||
except Exception:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise WebRTCError(str(traceback.format_exc()))
|
||||
|
||||
async def receive(self, frame: tuple[int, np.ndarray]) -> None:
|
||||
if not self.connection:
|
||||
return
|
||||
try:
|
||||
_, array = frame
|
||||
array = array.squeeze()
|
||||
audio_message = base64.b64encode(array.tobytes()).decode("utf-8")
|
||||
await self.connection.input_audio_buffer.append(audio=audio_message) # type: ignore
|
||||
except Exception as e:
|
||||
# print traceback
|
||||
print(f"Error in receive: {str(e)}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise WebRTCError(str(traceback.format_exc()))
|
||||
|
||||
async def emit(self) -> tuple[int, np.ndarray] | AdditionalOutputs | None:
|
||||
return await self.output_queue.get()
|
||||
|
||||
def reset_state(self):
|
||||
"""Reset connection state for new recording session"""
|
||||
self.connection = None
|
||||
self.args_set.clear()
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
if self.connection:
|
||||
await self.connection.close()
|
||||
self.reset_state()
|
||||
|
||||
|
||||
def update_chatbot(chatbot: list[dict], response: ResponseAudioTranscriptDoneEvent):
|
||||
chatbot.append({"role": "assistant", "content": response.transcript})
|
||||
return chatbot
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(type="messages")
|
||||
latest_message = gr.Textbox(type="text", visible=False)
|
||||
stream = Stream(
|
||||
OpenAIHandler(),
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
additional_inputs=[chatbot],
|
||||
additional_outputs=[chatbot],
|
||||
additional_outputs_handler=update_chatbot,
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def _():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (cur_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
@app.get("/outputs")
|
||||
def _(webrtc_id: str):
|
||||
async def output_stream():
|
||||
import json
|
||||
|
||||
async for output in stream.output_stream(webrtc_id):
|
||||
s = json.dumps({"role": "assistant", "content": output.args[0].transcript})
|
||||
yield f"event: output\ndata: {s}\n\n"
|
||||
|
||||
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
404
demo/talk_to_openai/index.html
Normal file
404
demo/talk_to_openai/index.html
Normal file
@@ -0,0 +1,404 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>OpenAI Real-Time Chat</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: "SF Pro Display", -apple-system, BlinkMacSystemFont, sans-serif;
|
||||
background-color: #0a0a0a;
|
||||
color: #ffffff;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
height: 100vh;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
height: calc(100% - 100px);
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: center;
|
||||
margin-bottom: 40px;
|
||||
}
|
||||
|
||||
.chat-container {
|
||||
border: 1px solid #333;
|
||||
padding: 20px;
|
||||
height: 90%;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.chat-messages {
|
||||
flex-grow: 1;
|
||||
overflow-y: auto;
|
||||
margin-bottom: 20px;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.message {
|
||||
margin-bottom: 20px;
|
||||
padding: 12px;
|
||||
border-radius: 4px;
|
||||
font-size: 16px;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.message.user {
|
||||
background-color: #1a1a1a;
|
||||
margin-left: 20%;
|
||||
}
|
||||
|
||||
.message.assistant {
|
||||
background-color: #262626;
|
||||
margin-right: 20%;
|
||||
}
|
||||
|
||||
.controls {
|
||||
text-align: center;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
button {
|
||||
background-color: transparent;
|
||||
color: #ffffff;
|
||||
border: 1px solid #ffffff;
|
||||
padding: 12px 24px;
|
||||
font-family: inherit;
|
||||
font-size: 16px;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
border-width: 2px;
|
||||
transform: scale(1.02);
|
||||
box-shadow: 0 0 10px rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
|
||||
#audio-output {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.icon-with-spinner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.spinner {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border: 2px solid #ffffff;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.pulse-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.pulse-circle {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
background-color: #ffffff;
|
||||
opacity: 0.2;
|
||||
flex-shrink: 0;
|
||||
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||
transition: transform 0.1s ease;
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div class="container">
|
||||
<div class="logo">
|
||||
<h1>OpenAI Real-Time Chat</h1>
|
||||
</div>
|
||||
<div class="chat-container">
|
||||
<div class="chat-messages" id="chat-messages"></div>
|
||||
</div>
|
||||
<div class="controls">
|
||||
<button id="start-button">Start Conversation</button>
|
||||
</div>
|
||||
</div>
|
||||
<audio id="audio-output"></audio>
|
||||
|
||||
<script>
|
||||
let peerConnection;
|
||||
let webrtc_id;
|
||||
const audioOutput = document.getElementById('audio-output');
|
||||
const startButton = document.getElementById('start-button');
|
||||
const chatMessages = document.getElementById('chat-messages');
|
||||
|
||||
let audioLevel = 0;
|
||||
let animationFrame;
|
||||
let audioContext, analyser, audioSource;
|
||||
|
||||
function updateButtonState() {
|
||||
const button = document.getElementById('start-button');
|
||||
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||
button.innerHTML = `
|
||||
<div class="icon-with-spinner">
|
||||
<div class="spinner"></div>
|
||||
<span>Connecting...</span>
|
||||
</div>
|
||||
`;
|
||||
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||
button.innerHTML = `
|
||||
<div class="pulse-container">
|
||||
<div class="pulse-circle"></div>
|
||||
<span>Stop Conversation</span>
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
button.innerHTML = 'Start Conversation';
|
||||
}
|
||||
}
|
||||
|
||||
function setupAudioVisualization(stream) {
|
||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||
analyser = audioContext.createAnalyser();
|
||||
audioSource = audioContext.createMediaStreamSource(stream);
|
||||
audioSource.connect(analyser);
|
||||
analyser.fftSize = 64;
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
function updateAudioLevel() {
|
||||
analyser.getByteFrequencyData(dataArray);
|
||||
const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
|
||||
audioLevel = average / 255;
|
||||
|
||||
// Update CSS variable instead of rebuilding the button
|
||||
const pulseCircle = document.querySelector('.pulse-circle');
|
||||
if (pulseCircle) {
|
||||
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||
}
|
||||
|
||||
animationFrame = requestAnimationFrame(updateAudioLevel);
|
||||
}
|
||||
updateAudioLevel();
|
||||
}
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
isConnecting = true;
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
|
||||
setupAudioVisualization(stream);
|
||||
|
||||
stream.getTracks().forEach(track => {
|
||||
peerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
peerConnection.addEventListener('track', (evt) => {
|
||||
if (audioOutput.srcObject !== evt.streams[0]) {
|
||||
audioOutput.srcObject = evt.streams[0];
|
||||
audioOutput.play();
|
||||
}
|
||||
});
|
||||
|
||||
const dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
}
|
||||
};
|
||||
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
console.log('connectionstatechange', peerConnection.connectionState);
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
updateButtonState();
|
||||
});
|
||||
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
const eventSource = new EventSource('/outputs?webrtc_id=' + webrtc_id);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
addMessage("assistant", eventJson.content);
|
||||
|
||||
});
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
}
|
||||
}
|
||||
|
||||
function addMessage(role, content) {
|
||||
const messageDiv = document.createElement('div');
|
||||
messageDiv.classList.add('message', role);
|
||||
messageDiv.textContent = content;
|
||||
chatMessages.appendChild(messageDiv);
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
}
|
||||
|
||||
function stop() {
|
||||
if (animationFrame) {
|
||||
cancelAnimationFrame(animationFrame);
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
audioContext = null;
|
||||
analyser = null;
|
||||
audioSource = null;
|
||||
}
|
||||
if (peerConnection) {
|
||||
if (peerConnection.getTransceivers) {
|
||||
peerConnection.getTransceivers().forEach(transceiver => {
|
||||
if (transceiver.stop) {
|
||||
transceiver.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (peerConnection.getSenders) {
|
||||
peerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
console.log('closing');
|
||||
peerConnection.close();
|
||||
}
|
||||
updateButtonState();
|
||||
audioLevel = 0;
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
console.log('clicked');
|
||||
console.log(peerConnection, peerConnection?.connectionState);
|
||||
if (!peerConnection || peerConnection.connectionState !== 'connected') {
|
||||
setupWebRTC();
|
||||
} else {
|
||||
console.log('stopping');
|
||||
stop();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
4
demo/talk_to_openai/requirements.txt
Normal file
4
demo/talk_to_openai/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc[vad]
|
||||
openai
|
||||
twilio
|
||||
python-dotenv
|
||||
15
demo/talk_to_sambanova/README.md
Normal file
15
demo/talk_to_sambanova/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Talk to Sambanova
|
||||
emoji: 💻
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Llama 3.2 - SambaNova API
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|SAMBANOVA_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
15
demo/talk_to_sambanova/README_gradio.md
Normal file
15
demo/talk_to_sambanova/README_gradio.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Talk to Sambanova (Gradio)
|
||||
emoji: 💻
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Llama 3.2 - SambaNova API (Gradio)
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|SAMBANOVA_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
152
demo/talk_to_sambanova/app.py
Normal file
152
demo/talk_to_sambanova/app.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnPause,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
get_stt_model,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from gradio.utils import get_space
|
||||
from pydantic import BaseModel
|
||||
|
||||
load_dotenv()
|
||||
|
||||
curr_dir = Path(__file__).parent
|
||||
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=os.environ.get("SAMBANOVA_API_KEY"),
|
||||
base_url="https://api.sambanova.ai/v1",
|
||||
)
|
||||
stt_model = get_stt_model()
|
||||
|
||||
|
||||
def response(
|
||||
audio: tuple[int, np.ndarray],
|
||||
gradio_chatbot: list[dict] | None = None,
|
||||
conversation_state: list[dict] | None = None,
|
||||
):
|
||||
gradio_chatbot = gradio_chatbot or []
|
||||
conversation_state = conversation_state or []
|
||||
|
||||
text = stt_model.stt(audio)
|
||||
sample_rate, array = audio
|
||||
gradio_chatbot.append(
|
||||
{"role": "user", "content": gr.Audio((sample_rate, array.squeeze()))}
|
||||
)
|
||||
yield AdditionalOutputs(gradio_chatbot, conversation_state)
|
||||
|
||||
conversation_state.append({"role": "user", "content": text})
|
||||
|
||||
try:
|
||||
request = client.chat.completions.create(
|
||||
model="Meta-Llama-3.2-3B-Instruct",
|
||||
messages=conversation_state, # type: ignore
|
||||
temperature=0.1,
|
||||
top_p=0.1,
|
||||
)
|
||||
response = {"role": "assistant", "content": request.choices[0].message.content}
|
||||
|
||||
except Exception:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise WebRTCError(traceback.format_exc())
|
||||
|
||||
conversation_state.append(response)
|
||||
gradio_chatbot.append(response)
|
||||
|
||||
yield AdditionalOutputs(gradio_chatbot, conversation_state)
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(type="messages", value=[])
|
||||
state = gr.State(value=[])
|
||||
stream = Stream(
|
||||
ReplyOnPause(
|
||||
response, # type: ignore
|
||||
input_sample_rate=16000,
|
||||
),
|
||||
mode="send",
|
||||
modality="audio",
|
||||
additional_inputs=[chatbot, state],
|
||||
additional_outputs=[chatbot, state],
|
||||
additional_outputs_handler=lambda *a: (a[2], a[3]),
|
||||
concurrency_limit=20 if get_space() else None,
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
)
|
||||
|
||||
app = FastAPI()
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class InputData(BaseModel):
|
||||
webrtc_id: str
|
||||
chatbot: list[Message]
|
||||
state: list[Message]
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def _():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (curr_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
@app.post("/input_hook")
|
||||
async def _(data: InputData):
|
||||
body = data.model_dump()
|
||||
stream.set_input(data.webrtc_id, body["chatbot"], body["state"])
|
||||
|
||||
|
||||
def audio_to_base64(file_path):
|
||||
audio_format = "wav"
|
||||
with open(file_path, "rb") as audio_file:
|
||||
encoded_audio = base64.b64encode(audio_file.read()).decode("utf-8")
|
||||
return f"data:audio/{audio_format};base64,{encoded_audio}"
|
||||
|
||||
|
||||
@app.get("/outputs")
|
||||
async def _(webrtc_id: str):
|
||||
async def output_stream():
|
||||
async for output in stream.output_stream(webrtc_id):
|
||||
chatbot = output.args[0]
|
||||
state = output.args[1]
|
||||
data = {
|
||||
"message": state[-1],
|
||||
"audio": audio_to_base64(chatbot[-1]["content"].value["path"])
|
||||
if chatbot[-1]["role"] == "user"
|
||||
else None,
|
||||
}
|
||||
yield f"event: output\ndata: {json.dumps(data)}\n\n"
|
||||
|
||||
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860)
|
||||
elif mode == "PHONE":
|
||||
raise ValueError("Phone mode not supported")
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
487
demo/talk_to_sambanova/index.html
Normal file
487
demo/talk_to_sambanova/index.html
Normal file
@@ -0,0 +1,487 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Talk to Sambanova</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
|
||||
background-color: #f8f9fa;
|
||||
color: #1a1a1a;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
height: 100vh;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
height: 80%;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: center;
|
||||
margin-bottom: 40px;
|
||||
}
|
||||
|
||||
.chat-container {
|
||||
background: white;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
||||
padding: 20px;
|
||||
height: 90%;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.chat-messages {
|
||||
flex-grow: 1;
|
||||
overflow-y: auto;
|
||||
margin-bottom: 20px;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.message {
|
||||
margin-bottom: 20px;
|
||||
padding: 12px;
|
||||
border-radius: 8px;
|
||||
font-size: 14px;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.message.user {
|
||||
background-color: #e9ecef;
|
||||
margin-left: 20%;
|
||||
}
|
||||
|
||||
.message.assistant {
|
||||
background-color: #f1f3f5;
|
||||
margin-right: 20%;
|
||||
}
|
||||
|
||||
.controls {
|
||||
text-align: center;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
button {
|
||||
background-color: #0066cc;
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 12px 24px;
|
||||
font-family: inherit;
|
||||
font-size: 14px;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s;
|
||||
border-radius: 4px;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
background-color: #0052a3;
|
||||
}
|
||||
|
||||
#audio-output {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.icon-with-spinner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.spinner {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border: 2px solid #ffffff;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.pulse-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.pulse-circle {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
background-color: #ffffff;
|
||||
opacity: 0.2;
|
||||
flex-shrink: 0;
|
||||
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||
transition: transform 0.1s ease;
|
||||
}
|
||||
|
||||
/* Add styles for typing indicator */
|
||||
.typing-indicator {
|
||||
padding: 8px;
|
||||
background-color: #f1f3f5;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 10px;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.dots {
|
||||
display: inline-flex;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
background-color: #0066cc;
|
||||
border-radius: 50%;
|
||||
animation: pulse 1.5s infinite;
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.dot:nth-child(2) {
|
||||
animation-delay: 0.5s;
|
||||
}
|
||||
|
||||
.dot:nth-child(3) {
|
||||
animation-delay: 1s;
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
|
||||
0%,
|
||||
100% {
|
||||
opacity: 0.5;
|
||||
transform: scale(1);
|
||||
}
|
||||
|
||||
50% {
|
||||
opacity: 1;
|
||||
transform: scale(1.2);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div class="container">
|
||||
<div class="logo">
|
||||
<h1>Talk to Sambanova 🗣️</h1>
|
||||
<h2 style="font-size: 1.2em; color: #666; margin-top: 10px;">Speak to Llama 3.2 powered by Sambanova API
|
||||
</h2>
|
||||
</div>
|
||||
<div class="chat-container">
|
||||
<div class="chat-messages" id="chat-messages"></div>
|
||||
<div class="typing-indicator" id="typing-indicator">
|
||||
<div class="dots">
|
||||
<div class="dot"></div>
|
||||
<div class="dot"></div>
|
||||
<div class="dot"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="controls">
|
||||
<button id="start-button">Start Conversation</button>
|
||||
</div>
|
||||
</div>
|
||||
<audio id="audio-output"></audio>
|
||||
|
||||
<script>
|
||||
let peerConnection;
|
||||
let webrtc_id;
|
||||
const startButton = document.getElementById('start-button');
|
||||
const chatMessages = document.getElementById('chat-messages');
|
||||
|
||||
let audioLevel = 0;
|
||||
let animationFrame;
|
||||
let audioContext, analyser, audioSource;
|
||||
let messages = [];
|
||||
let eventSource;
|
||||
|
||||
function updateButtonState() {
|
||||
const button = document.getElementById('start-button');
|
||||
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||
button.innerHTML = `
|
||||
<div class="icon-with-spinner">
|
||||
<div class="spinner"></div>
|
||||
<span>Connecting...</span>
|
||||
</div>
|
||||
`;
|
||||
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||
button.innerHTML = `
|
||||
<div class="pulse-container">
|
||||
<div class="pulse-circle"></div>
|
||||
<span>Stop Conversation</span>
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
button.innerHTML = 'Start Conversation';
|
||||
}
|
||||
}
|
||||
|
||||
function setupAudioVisualization(stream) {
|
||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||
analyser = audioContext.createAnalyser();
|
||||
audioSource = audioContext.createMediaStreamSource(stream);
|
||||
audioSource.connect(analyser);
|
||||
analyser.fftSize = 64;
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
function updateAudioLevel() {
|
||||
analyser.getByteFrequencyData(dataArray);
|
||||
const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
|
||||
audioLevel = average / 255;
|
||||
|
||||
const pulseCircle = document.querySelector('.pulse-circle');
|
||||
if (pulseCircle) {
|
||||
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||
}
|
||||
|
||||
animationFrame = requestAnimationFrame(updateAudioLevel);
|
||||
}
|
||||
updateAudioLevel();
|
||||
}
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.className = 'toast error';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
function handleMessage(event) {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
const typingIndicator = document.getElementById('typing-indicator');
|
||||
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
} else if (eventJson.type === "send_input") {
|
||||
fetch('/input_hook', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
webrtc_id: webrtc_id,
|
||||
chatbot: messages,
|
||||
state: messages
|
||||
})
|
||||
});
|
||||
} else if (eventJson.type === "log") {
|
||||
if (eventJson.data === "pause_detected") {
|
||||
typingIndicator.style.display = 'block';
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
} else if (eventJson.data === "response_starting") {
|
||||
typingIndicator.style.display = 'none';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
|
||||
setupAudioVisualization(stream);
|
||||
|
||||
stream.getTracks().forEach(track => {
|
||||
peerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
const dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = handleMessage;
|
||||
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
console.log('connectionstatechange', peerConnection.connectionState);
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
updateButtonState();
|
||||
});
|
||||
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
eventSource = new EventSource('/outputs?webrtc_id=' + webrtc_id);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
console.log(eventJson);
|
||||
messages.push(eventJson.message);
|
||||
addMessage(eventJson.message.role, eventJson.audio ?? eventJson.message.content);
|
||||
});
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
}
|
||||
}
|
||||
|
||||
function addMessage(role, content) {
|
||||
const messageDiv = document.createElement('div');
|
||||
messageDiv.classList.add('message', role);
|
||||
|
||||
if (role === 'user') {
|
||||
// Create audio element for user messages
|
||||
const audio = document.createElement('audio');
|
||||
audio.controls = true;
|
||||
audio.src = content;
|
||||
messageDiv.appendChild(audio);
|
||||
} else {
|
||||
// Text content for assistant messages
|
||||
messageDiv.textContent = content;
|
||||
}
|
||||
|
||||
chatMessages.appendChild(messageDiv);
|
||||
chatMessages.scrollTop = chatMessages.scrollHeight;
|
||||
}
|
||||
|
||||
function stop() {
|
||||
if (eventSource) {
|
||||
eventSource.close();
|
||||
eventSource = null;
|
||||
}
|
||||
|
||||
if (animationFrame) {
|
||||
cancelAnimationFrame(animationFrame);
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
audioContext = null;
|
||||
analyser = null;
|
||||
audioSource = null;
|
||||
}
|
||||
if (peerConnection) {
|
||||
if (peerConnection.getTransceivers) {
|
||||
peerConnection.getTransceivers().forEach(transceiver => {
|
||||
if (transceiver.stop) {
|
||||
transceiver.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (peerConnection.getSenders) {
|
||||
peerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
peerConnection.close();
|
||||
}
|
||||
updateButtonState();
|
||||
audioLevel = 0;
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
if (!peerConnection || peerConnection.connectionState !== 'connected') {
|
||||
setupWebRTC();
|
||||
} else {
|
||||
stop();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
4
demo/talk_to_sambanova/requirements.txt
Normal file
4
demo/talk_to_sambanova/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc[vad, stt]
|
||||
python-dotenv
|
||||
openai
|
||||
twilio
|
||||
@@ -1,65 +0,0 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
from twilio.rest import Client
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def generation(input_video):
|
||||
cap = cv2.VideoCapture(input_video)
|
||||
|
||||
iterating = True
|
||||
|
||||
while iterating:
|
||||
iterating, frame = cap.read()
|
||||
|
||||
# flip frame vertically
|
||||
frame = cv2.flip(frame, 0)
|
||||
display_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
yield display_frame
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Video Streaming (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
input_video = gr.Video(sources="upload")
|
||||
with gr.Column():
|
||||
output_video = WebRTC(
|
||||
label="Video Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="video",
|
||||
)
|
||||
output_video.stream(
|
||||
fn=generation,
|
||||
inputs=[input_video],
|
||||
outputs=[output_video],
|
||||
trigger=input_video.upload,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
@@ -1,54 +0,0 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
from twilio.rest import Client
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def generation():
|
||||
url = "https://download.tsi.telecom-paristech.fr/gpac/dataset/dash/uhd/mux_sources/hevcds_720p30_2M.mp4"
|
||||
cap = cv2.VideoCapture(url)
|
||||
iterating = True
|
||||
while iterating:
|
||||
iterating, frame = cap.read()
|
||||
yield frame
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Video Streaming (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
output_video = WebRTC(
|
||||
label="Video Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="video",
|
||||
)
|
||||
button = gr.Button("Start", variant="primary")
|
||||
output_video.stream(
|
||||
fn=generation, inputs=None, outputs=[output_video], trigger=button.click
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
@@ -1,102 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import AdditionalOutputs, WebRTC
|
||||
from huggingface_hub import hf_hub_download
|
||||
from inference import YOLOv10
|
||||
from twilio.rest import Client
|
||||
|
||||
# Configure the root logger to WARNING to suppress debug messages from other libraries
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
|
||||
# Create a console handler
|
||||
console_handler = logging.FileHandler("gradio_webrtc.log")
|
||||
console_handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Create a formatter
|
||||
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
console_handler.setFormatter(formatter)
|
||||
|
||||
# Configure the logger for your specific library
|
||||
logger = logging.getLogger("gradio_webrtc")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
|
||||
model_file = hf_hub_download(
|
||||
repo_id="onnx-community/yolov10n", filename="onnx/model.onnx"
|
||||
)
|
||||
|
||||
model = YOLOv10(model_file)
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
image = cv2.resize(image, (model.input_width, model.input_height))
|
||||
new_image = model.detect_objects(image, conf_threshold)
|
||||
return cv2.resize(new_image, (500, 500))
|
||||
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
|
||||
with gr.Blocks(css=css) as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
YOLOv10 Webcam Stream (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
gr.HTML(
|
||||
"""
|
||||
<h3 style='text-align: center'>
|
||||
<a href='https://arxiv.org/abs/2405.14458' target='_blank'>arXiv</a> | <a href='https://github.com/THU-MIG/yolov10' target='_blank'>github</a>
|
||||
</h3>
|
||||
"""
|
||||
)
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
image = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="send-receive",
|
||||
modality="video",
|
||||
track_constraints={
|
||||
"width": {"exact": 800},
|
||||
"height": {"exact": 600},
|
||||
"aspectRatio": {"exact": 1.33333},
|
||||
},
|
||||
rtp_params={"degradationPreference": "maintain-resolution"},
|
||||
)
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
number = gr.Number()
|
||||
|
||||
image.stream(
|
||||
fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=90
|
||||
)
|
||||
|
||||
demo.launch()
|
||||
15
demo/webrtc_vs_websocket/README.md
Normal file
15
demo/webrtc_vs_websocket/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Webrtc Vs Websocket
|
||||
emoji: 🧪
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Compare Round Trip Times between WebRTC and Websockets
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|ELEVENLABS_API_KEY, secret|GROQ_API_KEY, secret|ANTHROPIC_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
147
demo/webrtc_vs_websocket/app.py
Normal file
147
demo/webrtc_vs_websocket/app.py
Normal file
@@ -0,0 +1,147 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import anthropic
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
from elevenlabs import ElevenLabs
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from fastrtc import AdditionalOutputs, ReplyOnPause, Stream, get_twilio_turn_credentials
|
||||
from fastrtc.utils import aggregate_bytes_to_16bit, audio_to_bytes
|
||||
from gradio.utils import get_space
|
||||
from groq import Groq
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Configure the root logger to WARNING to suppress debug messages from other libraries
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
|
||||
# Create a console handler
|
||||
console_handler = logging.FileHandler("gradio_webrtc.log")
|
||||
console_handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Create a formatter
|
||||
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
console_handler.setFormatter(formatter)
|
||||
|
||||
# Configure the logger for your specific library
|
||||
logger = logging.getLogger("fastrtc")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
groq_client = Groq()
|
||||
claude_client = anthropic.Anthropic()
|
||||
tts_client = ElevenLabs(api_key=os.environ["ELEVENLABS_API_KEY"])
|
||||
|
||||
curr_dir = Path(__file__).parent
|
||||
|
||||
|
||||
def response(
|
||||
audio: tuple[int, np.ndarray],
|
||||
chatbot: list[dict] | None = None,
|
||||
):
|
||||
chatbot = chatbot or []
|
||||
messages = [{"role": d["role"], "content": d["content"]} for d in chatbot]
|
||||
prompt = groq_client.audio.transcriptions.create(
|
||||
file=("audio-file.mp3", audio_to_bytes(audio)),
|
||||
model="whisper-large-v3-turbo",
|
||||
response_format="verbose_json",
|
||||
).text
|
||||
print("prompt", prompt)
|
||||
chatbot.append({"role": "user", "content": prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
response = claude_client.messages.create(
|
||||
model="claude-3-5-haiku-20241022",
|
||||
max_tokens=512,
|
||||
messages=messages, # type: ignore
|
||||
)
|
||||
response_text = " ".join(
|
||||
block.text # type: ignore
|
||||
for block in response.content
|
||||
if getattr(block, "type", None) == "text"
|
||||
)
|
||||
chatbot.append({"role": "assistant", "content": response_text})
|
||||
yield AdditionalOutputs(chatbot)
|
||||
iterator = tts_client.text_to_speech.convert_as_stream(
|
||||
text=response_text,
|
||||
voice_id="JBFqnCBsd6RMkjVDRZzb",
|
||||
model_id="eleven_multilingual_v2",
|
||||
output_format="pcm_24000",
|
||||
)
|
||||
for chunk in aggregate_bytes_to_16bit(iterator):
|
||||
audio_array = np.frombuffer(chunk, dtype=np.int16).reshape(1, -1)
|
||||
yield (24000, audio_array, "mono")
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(type="messages")
|
||||
stream = Stream(
|
||||
modality="audio",
|
||||
mode="send-receive",
|
||||
handler=ReplyOnPause(response),
|
||||
additional_outputs_handler=lambda a, b: b,
|
||||
additional_inputs=[chatbot],
|
||||
additional_outputs=[chatbot],
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=20 if get_space() else None,
|
||||
)
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class InputData(BaseModel):
|
||||
webrtc_id: str
|
||||
chatbot: list[Message]
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def _():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (curr_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content, status_code=200)
|
||||
|
||||
|
||||
@app.post("/input_hook")
|
||||
async def _(body: InputData):
|
||||
stream.set_input(body.webrtc_id, body.model_dump()["chatbot"])
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.get("/outputs")
|
||||
def _(webrtc_id: str):
|
||||
print("outputs", webrtc_id)
|
||||
|
||||
async def output_stream():
|
||||
async for output in stream.output_stream(webrtc_id):
|
||||
chatbot = output.args[0]
|
||||
yield f"event: output\ndata: {json.dumps(chatbot[-2])}\n\n"
|
||||
yield f"event: output\ndata: {json.dumps(chatbot[-1])}\n\n"
|
||||
|
||||
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860, server_name="0.0.0.0")
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
630
demo/webrtc_vs_websocket/index.html
Normal file
630
demo/webrtc_vs_websocket/index.html
Normal file
@@ -0,0 +1,630 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>WebRTC vs WebSocket Benchmark</title>
|
||||
<script src="https://cdn.jsdelivr.net/npm/alawmulaw"></script>
|
||||
<style>
|
||||
body {
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
.container {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
gap: 30px;
|
||||
max-width: 1400px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.panel {
|
||||
background: white;
|
||||
border-radius: 12px;
|
||||
padding: 20px;
|
||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.chat-container {
|
||||
height: 400px;
|
||||
overflow-y: auto;
|
||||
border: 1px solid #e0e0e0;
|
||||
border-radius: 8px;
|
||||
padding: 15px;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
.message {
|
||||
margin-bottom: 10px;
|
||||
padding: 8px 12px;
|
||||
border-radius: 8px;
|
||||
max-width: 80%;
|
||||
}
|
||||
|
||||
.message.user {
|
||||
background-color: #e3f2fd;
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.message.assistant {
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
.metrics {
|
||||
margin-top: 15px;
|
||||
padding: 10px;
|
||||
background: #f8f9fa;
|
||||
border-radius: 8px;
|
||||
}
|
||||
|
||||
.metric {
|
||||
margin: 5px 0;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
button {
|
||||
background-color: #1976d2;
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 10px 20px;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
background-color: #1565c0;
|
||||
}
|
||||
|
||||
button:disabled {
|
||||
background-color: #bdbdbd;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
h2 {
|
||||
margin-top: 0;
|
||||
color: #1976d2;
|
||||
}
|
||||
|
||||
.visualizer {
|
||||
width: 100%;
|
||||
height: 100px;
|
||||
margin: 10px 0;
|
||||
background: #fafafa;
|
||||
border-radius: 8px;
|
||||
}
|
||||
|
||||
/* Add styles for disclaimer */
|
||||
.disclaimer {
|
||||
background-color: #fff3e0;
|
||||
padding: 15px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 20px;
|
||||
font-size: 14px;
|
||||
line-height: 1.5;
|
||||
max-width: 1400px;
|
||||
margin: 0 auto 20px auto;
|
||||
}
|
||||
|
||||
/* Update nav bar styles */
|
||||
.nav-bar {
|
||||
background-color: #f5f5f5;
|
||||
padding: 10px 20px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.nav-container {
|
||||
max-width: 1400px;
|
||||
margin: 0 auto;
|
||||
display: flex;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.nav-button {
|
||||
background-color: #1976d2;
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 8px 16px;
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
text-decoration: none;
|
||||
font-size: 14px;
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
.nav-button:hover {
|
||||
background-color: #1565c0;
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<nav class="nav-bar">
|
||||
<div class="nav-container">
|
||||
<a href="./webrtc/docs" class="nav-button">WebRTC Docs</a>
|
||||
<a href="./websocket/docs" class="nav-button">WebSocket Docs</a>
|
||||
<a href="./telephone/docs" class="nav-button">Telephone Docs</a>
|
||||
<a href="./ui" class="nav-button">UI</a>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div class="disclaimer">
|
||||
This page compares the WebRTC Round-Trip-Time calculated from <code>getStats()</code> to the time taken to
|
||||
process a ping/pong response pattern over websockets. It may not be a gold standard benchmark. Both WebRTC and
|
||||
Websockets have their merits/advantages which is why FastRTC supports both. Artifacts in the WebSocket playback
|
||||
audio are due to gaps in my frontend processing code and not FastRTC web server.
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="panel">
|
||||
<h2>WebRTC Connection</h2>
|
||||
<div id="webrtc-chat" class="chat-container"></div>
|
||||
<div id="webrtc-metrics" class="metrics">
|
||||
<div class="metric">RTT (Round Trip Time): <span id="webrtc-rtt">-</span></div>
|
||||
</div>
|
||||
<button id="webrtc-button">Connect WebRTC</button>
|
||||
</div>
|
||||
|
||||
<div class="panel">
|
||||
<h2>WebSocket Connection</h2>
|
||||
<div id="ws-chat" class="chat-container"></div>
|
||||
<div id="ws-metrics" class="metrics">
|
||||
<div class="metric">RTT (Round Trip Time): <span id="ws-rtt">0</span></div>
|
||||
</div>
|
||||
<button id="ws-button">Connect WebSocket</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<audio id="webrtc-audio" style="display: none;"></audio>
|
||||
<audio id="ws-audio" style="display: none;"></audio>
|
||||
|
||||
<div id="error-toast" class="toast"></div>
|
||||
|
||||
<script>
|
||||
// Shared utilities
|
||||
function generateId() {
|
||||
return Math.random().toString(36).substring(7);
|
||||
}
|
||||
|
||||
function sendInput(id) {
|
||||
|
||||
return function handleMessage(event) {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
if (eventJson.type === "send_input") {
|
||||
fetch('/input_hook', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
webrtc_id: id,
|
||||
chatbot: chatHistoryWebRTC
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let chatHistoryWebRTC = [];
|
||||
let chatHistoryWebSocket = [];
|
||||
|
||||
function addMessage(containerId, role, content) {
|
||||
const container = document.getElementById(containerId);
|
||||
const messageDiv = document.createElement('div');
|
||||
messageDiv.classList.add('message', role);
|
||||
messageDiv.textContent = content;
|
||||
container.appendChild(messageDiv);
|
||||
container.scrollTop = container.scrollHeight;
|
||||
if (containerId === 'webrtc-chat') {
|
||||
chatHistoryWebRTC.push({ role, content });
|
||||
} else {
|
||||
chatHistoryWebSocket.push({ role, content });
|
||||
}
|
||||
}
|
||||
|
||||
// WebRTC Implementation
|
||||
let webrtcPeerConnection;
|
||||
|
||||
// Add this function to collect RTT stats
|
||||
async function updateWebRTCStats() {
|
||||
if (!webrtcPeerConnection) return;
|
||||
|
||||
const stats = await webrtcPeerConnection.getStats();
|
||||
stats.forEach(report => {
|
||||
if (report.type === 'candidate-pair' && report.state === 'succeeded') {
|
||||
const rtt = report.currentRoundTripTime * 1000; // Convert to ms
|
||||
document.getElementById('webrtc-rtt').textContent = `${rtt.toFixed(2)}ms`;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const button = document.getElementById('webrtc-button');
|
||||
button.textContent = "Stop";
|
||||
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
webrtcPeerConnection = new RTCPeerConnection(config);
|
||||
const webrtcId = generateId();
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
stream.getTracks().forEach(track => {
|
||||
webrtcPeerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
webrtcPeerConnection.addEventListener('track', (evt) => {
|
||||
const audio = document.getElementById('webrtc-audio');
|
||||
if (audio.srcObject !== evt.streams[0]) {
|
||||
audio.srcObject = evt.streams[0];
|
||||
audio.play();
|
||||
}
|
||||
});
|
||||
|
||||
const dataChannel = webrtcPeerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = sendInput(webrtcId);
|
||||
|
||||
const offer = await webrtcPeerConnection.createOffer();
|
||||
await webrtcPeerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (webrtcPeerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (webrtcPeerConnection.iceGatheringState === "complete") {
|
||||
webrtcPeerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
webrtcPeerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: webrtcPeerConnection.localDescription.sdp,
|
||||
type: webrtcPeerConnection.localDescription.type,
|
||||
webrtc_id: webrtcId
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
await webrtcPeerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
// Setup event source for messages
|
||||
const eventSource = new EventSource('/outputs?webrtc_id=' + webrtcId);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
const eventJson = JSON.parse(event.data);
|
||||
addMessage('webrtc-chat', eventJson.role, eventJson.content);
|
||||
});
|
||||
|
||||
// Add periodic stats collection
|
||||
const statsInterval = setInterval(updateWebRTCStats, 1000);
|
||||
|
||||
// Store the interval ID on the connection
|
||||
webrtcPeerConnection.statsInterval = statsInterval;
|
||||
|
||||
webrtcPeerConnection.addEventListener('connectionstatechange', () => {
|
||||
if (webrtcPeerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('WebRTC setup error:', err);
|
||||
}
|
||||
}
|
||||
|
||||
function webrtc_stop() {
|
||||
if (webrtcPeerConnection) {
|
||||
// Clear the stats interval
|
||||
if (webrtcPeerConnection.statsInterval) {
|
||||
clearInterval(webrtcPeerConnection.statsInterval);
|
||||
}
|
||||
|
||||
// Close all tracks
|
||||
webrtcPeerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track) {
|
||||
sender.track.stop();
|
||||
}
|
||||
});
|
||||
|
||||
webrtcPeerConnection.close();
|
||||
webrtcPeerConnection = null;
|
||||
|
||||
// Reset metrics display
|
||||
document.getElementById('webrtc-rtt').textContent = '-';
|
||||
}
|
||||
}
|
||||
|
||||
// WebSocket Implementation
|
||||
let webSocket;
|
||||
let wsMetrics = {
|
||||
pingStartTime: 0,
|
||||
rttValues: []
|
||||
};
|
||||
|
||||
// Load mu-law library
|
||||
|
||||
// Add load promise to track when the script is ready
|
||||
|
||||
|
||||
function resample(audioData, fromSampleRate, toSampleRate) {
|
||||
const ratio = fromSampleRate / toSampleRate;
|
||||
const newLength = Math.round(audioData.length / ratio);
|
||||
const result = new Float32Array(newLength);
|
||||
|
||||
for (let i = 0; i < newLength; i++) {
|
||||
const position = i * ratio;
|
||||
const index = Math.floor(position);
|
||||
const fraction = position - index;
|
||||
|
||||
if (index + 1 < audioData.length) {
|
||||
result[i] = audioData[index] * (1 - fraction) + audioData[index + 1] * fraction;
|
||||
} else {
|
||||
result[i] = audioData[index];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function convertToMulaw(audioData, sampleRate) {
|
||||
// Resample to 8000 Hz if needed
|
||||
if (sampleRate !== 8000) {
|
||||
audioData = resample(audioData, sampleRate, 8000);
|
||||
}
|
||||
|
||||
// Convert float32 [-1,1] to int16 [-32768,32767]
|
||||
const int16Data = new Int16Array(audioData.length);
|
||||
for (let i = 0; i < audioData.length; i++) {
|
||||
int16Data[i] = Math.floor(audioData[i] * 32768);
|
||||
}
|
||||
|
||||
// Convert to mu-law using the library
|
||||
return alawmulaw.mulaw.encode(int16Data);
|
||||
}
|
||||
|
||||
async function setupWebSocket() {
|
||||
const button = document.getElementById('ws-button');
|
||||
button.textContent = "Stop";
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: {
|
||||
"echoCancellation": true,
|
||||
"noiseSuppression": { "exact": true },
|
||||
"autoGainControl": { "exact": true },
|
||||
"sampleRate": { "ideal": 24000 },
|
||||
"sampleSize": { "ideal": 16 },
|
||||
"channelCount": { "exact": 1 },
|
||||
}
|
||||
});
|
||||
const wsId = generateId();
|
||||
wsMetrics.startTime = performance.now();
|
||||
|
||||
// Create audio context and analyser for visualization
|
||||
const audioContext = new AudioContext();
|
||||
const analyser = audioContext.createAnalyser();
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
source.connect(analyser);
|
||||
|
||||
// Connect to websocket endpoint
|
||||
webSocket = new WebSocket(`${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${window.location.host}/websocket/offer`);
|
||||
|
||||
webSocket.onopen = () => {
|
||||
// Send initial start message
|
||||
webSocket.send(JSON.stringify({
|
||||
event: "start",
|
||||
websocket_id: wsId
|
||||
}));
|
||||
|
||||
// Setup audio processing
|
||||
const processor = audioContext.createScriptProcessor(2048, 1, 1);
|
||||
source.connect(processor);
|
||||
processor.connect(audioContext.destination);
|
||||
|
||||
processor.onaudioprocess = (e) => {
|
||||
const inputData = e.inputBuffer.getChannelData(0);
|
||||
const mulawData = convertToMulaw(inputData, audioContext.sampleRate);
|
||||
const base64Audio = btoa(String.fromCharCode.apply(null, mulawData));
|
||||
if (webSocket.readyState === WebSocket.OPEN) {
|
||||
webSocket.send(JSON.stringify({
|
||||
event: "media",
|
||||
media: {
|
||||
payload: base64Audio
|
||||
}
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
// Add ping interval
|
||||
webSocket.pingInterval = setInterval(() => {
|
||||
wsMetrics.pingStartTime = performance.now();
|
||||
webSocket.send(JSON.stringify({
|
||||
event: "ping"
|
||||
}));
|
||||
}, 500);
|
||||
};
|
||||
|
||||
// Setup audio output context
|
||||
const outputContext = new AudioContext({ sampleRate: 24000 });
|
||||
const sampleRate = 24000; // Updated to match server sample rate
|
||||
let audioQueue = [];
|
||||
let isPlaying = false;
|
||||
|
||||
webSocket.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
if (data?.type === "send_input") {
|
||||
console.log("sending input")
|
||||
fetch('/input_hook', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ webrtc_id: wsId, chatbot: chatHistoryWebSocket })
|
||||
});
|
||||
}
|
||||
if (data.event === "media") {
|
||||
// Process received audio
|
||||
const audioData = atob(data.media.payload);
|
||||
const mulawData = new Uint8Array(audioData.length);
|
||||
for (let i = 0; i < audioData.length; i++) {
|
||||
mulawData[i] = audioData.charCodeAt(i);
|
||||
}
|
||||
|
||||
// Convert mu-law to linear PCM
|
||||
const linearData = alawmulaw.mulaw.decode(mulawData);
|
||||
|
||||
// Create an AudioBuffer
|
||||
const audioBuffer = outputContext.createBuffer(1, linearData.length, sampleRate);
|
||||
const channelData = audioBuffer.getChannelData(0);
|
||||
|
||||
// Fill the buffer with the decoded data
|
||||
for (let i = 0; i < linearData.length; i++) {
|
||||
channelData[i] = linearData[i] / 32768.0;
|
||||
}
|
||||
|
||||
// Queue the audio buffer
|
||||
audioQueue.push(audioBuffer);
|
||||
|
||||
// Start playing if not already playing
|
||||
if (!isPlaying) {
|
||||
playNextBuffer();
|
||||
}
|
||||
}
|
||||
|
||||
// Add pong handler
|
||||
if (data.event === "pong") {
|
||||
const rtt = performance.now() - wsMetrics.pingStartTime;
|
||||
wsMetrics.rttValues.push(rtt);
|
||||
// Keep only last 20 values for running mean
|
||||
if (wsMetrics.rttValues.length > 20) {
|
||||
wsMetrics.rttValues.shift();
|
||||
}
|
||||
const avgRtt = wsMetrics.rttValues.reduce((a, b) => a + b, 0) / wsMetrics.rttValues.length;
|
||||
document.getElementById('ws-rtt').textContent = `${avgRtt.toFixed(2)}ms`;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
function playNextBuffer() {
|
||||
if (audioQueue.length === 0) {
|
||||
isPlaying = false;
|
||||
return;
|
||||
}
|
||||
|
||||
isPlaying = true;
|
||||
const bufferSource = outputContext.createBufferSource();
|
||||
bufferSource.buffer = audioQueue.shift();
|
||||
bufferSource.connect(outputContext.destination);
|
||||
|
||||
bufferSource.onended = playNextBuffer;
|
||||
bufferSource.start();
|
||||
}
|
||||
|
||||
const eventSource = new EventSource('/outputs?webrtc_id=' + wsId);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
console.log("ws output", event);
|
||||
const eventJson = JSON.parse(event.data);
|
||||
addMessage('ws-chat', eventJson.role, eventJson.content);
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
console.error('WebSocket setup error:', err);
|
||||
button.disabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
function ws_stop() {
|
||||
if (webSocket) {
|
||||
webSocket.send(JSON.stringify({
|
||||
event: "stop"
|
||||
}));
|
||||
// Clear ping interval
|
||||
if (webSocket.pingInterval) {
|
||||
clearInterval(webSocket.pingInterval);
|
||||
}
|
||||
// Reset RTT display
|
||||
document.getElementById('ws-rtt').textContent = '-';
|
||||
wsMetrics.rttValues = [];
|
||||
|
||||
// Clear the stats interval
|
||||
if (webSocket.statsInterval) {
|
||||
clearInterval(webSocket.statsInterval);
|
||||
}
|
||||
webSocket.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Event Listeners
|
||||
document.getElementById('webrtc-button').addEventListener('click', () => {
|
||||
const button = document.getElementById('webrtc-button');
|
||||
if (button.textContent === 'Connect WebRTC') {
|
||||
setupWebRTC();
|
||||
} else {
|
||||
webrtc_stop();
|
||||
button.textContent = 'Connect WebRTC';
|
||||
}
|
||||
});
|
||||
const ws_start_button = document.getElementById('ws-button')
|
||||
ws_start_button.addEventListener('click', () => {
|
||||
if (ws_start_button.textContent === 'Connect WebSocket') {
|
||||
setupWebSocket();
|
||||
ws_start_button.textContent = 'Stop';
|
||||
} else {
|
||||
ws_stop();
|
||||
ws_start_button.textContent = 'Connect WebSocket';
|
||||
}
|
||||
});
|
||||
document.addEventListener("beforeunload", () => {
|
||||
ws_stop();
|
||||
webrtc_stop();
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
6
demo/webrtc_vs_websocket/requirements.txt
Normal file
6
demo/webrtc_vs_websocket/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
fastrtc[vad]
|
||||
elevenlabs
|
||||
groq
|
||||
anthropic
|
||||
twilio
|
||||
python-dotenv
|
||||
15
demo/whisper_realtime/README.md
Normal file
15
demo/whisper_realtime/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: Whisper Realtime Transcription
|
||||
emoji: 👂
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
short_description: Transcribe audio in realtime with Whisper
|
||||
tags: [webrtc, websocket, gradio, secret|TWILIO_ACCOUNT_SID, secret|TWILIO_AUTH_TOKEN, secret|GROQ_API_KEY]
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
22
demo/whisper_realtime/README_gradio.md
Normal file
22
demo/whisper_realtime/README_gradio.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
app_file: app.py
|
||||
colorFrom: purple
|
||||
colorTo: red
|
||||
emoji: 👂
|
||||
license: mit
|
||||
pinned: false
|
||||
sdk: gradio
|
||||
sdk_version: 5.16.0
|
||||
short_description: Transcribe audio in realtime - Gradio UI version
|
||||
tags:
|
||||
- webrtc
|
||||
- websocket
|
||||
- gradio
|
||||
- secret|TWILIO_ACCOUNT_SID
|
||||
- secret|TWILIO_AUTH_TOKEN
|
||||
- secret|GROQ_API_KEY
|
||||
title: Whisper Realtime Transcription (Gradio UI)
|
||||
---
|
||||
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
||||
86
demo/whisper_realtime/app.py
Normal file
86
demo/whisper_realtime/app.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
from fastrtc import (
|
||||
AdditionalOutputs,
|
||||
ReplyOnPause,
|
||||
Stream,
|
||||
WebRTCError,
|
||||
audio_to_bytes,
|
||||
get_twilio_turn_credentials,
|
||||
)
|
||||
from gradio.utils import get_space
|
||||
from groq import AsyncClient
|
||||
|
||||
cur_dir = Path(__file__).parent
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
groq_client = AsyncClient()
|
||||
|
||||
|
||||
async def transcribe(audio: tuple[int, np.ndarray]):
|
||||
try:
|
||||
transcript = await groq_client.audio.transcriptions.create(
|
||||
file=("audio-file.mp3", audio_to_bytes(audio)),
|
||||
model="whisper-large-v3-turbo",
|
||||
response_format="verbose_json",
|
||||
)
|
||||
yield AdditionalOutputs(transcript.text)
|
||||
except Exception as e:
|
||||
raise WebRTCError(str(e))
|
||||
|
||||
|
||||
stream = Stream(
|
||||
ReplyOnPause(transcribe),
|
||||
modality="audio",
|
||||
mode="send",
|
||||
additional_outputs=[
|
||||
gr.Textbox(label="Transcript"),
|
||||
],
|
||||
additional_outputs_handler=lambda a, b: a + " " + b,
|
||||
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
|
||||
concurrency_limit=5 if get_space() else None,
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
stream.mount(app)
|
||||
|
||||
|
||||
@app.get("/transcript")
|
||||
def _(webrtc_id: str):
|
||||
async def output_stream():
|
||||
async for output in stream.output_stream(webrtc_id):
|
||||
transcript = output.args[0]
|
||||
yield f"event: output\ndata: {transcript}\n\n"
|
||||
|
||||
return StreamingResponse(output_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
def index():
|
||||
rtc_config = get_twilio_turn_credentials() if get_space() else None
|
||||
html_content = (cur_dir / "index.html").read_text()
|
||||
html_content = html_content.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
stream.ui.launch(server_port=7860, server_name="0.0.0.0")
|
||||
elif mode == "PHONE":
|
||||
stream.fastphone(host="0.0.0.0", port=7860)
|
||||
else:
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
424
demo/whisper_realtime/index.html
Normal file
424
demo/whisper_realtime/index.html
Normal file
@@ -0,0 +1,424 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Real-time Whisper Transcription</title>
|
||||
<style>
|
||||
:root {
|
||||
--primary-gradient: linear-gradient(135deg, #f9a45c 0%, #e66465 100%);
|
||||
--background-cream: #faf8f5;
|
||||
--text-dark: #2d2d2d;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background-color: var(--background-cream);
|
||||
color: var(--text-dark);
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.hero {
|
||||
background: var(--primary-gradient);
|
||||
color: white;
|
||||
padding: 2.5rem 2rem;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.hero h1 {
|
||||
font-size: 2.5rem;
|
||||
margin: 0;
|
||||
font-weight: 600;
|
||||
letter-spacing: -0.5px;
|
||||
}
|
||||
|
||||
.hero p {
|
||||
font-size: 1rem;
|
||||
margin-top: 0.5rem;
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 1000px;
|
||||
margin: 1.5rem auto;
|
||||
padding: 0 2rem;
|
||||
}
|
||||
|
||||
.transcript-container {
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.06);
|
||||
padding: 1.5rem;
|
||||
height: 300px;
|
||||
overflow-y: auto;
|
||||
margin-bottom: 1.5rem;
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.controls {
|
||||
text-align: center;
|
||||
margin: 1.5rem 0;
|
||||
}
|
||||
|
||||
button {
|
||||
background: var(--primary-gradient);
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 10px 20px;
|
||||
font-size: 0.95rem;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
font-weight: 500;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
transform: translateY(-1px);
|
||||
box-shadow: 0 4px 12px rgba(230, 100, 101, 0.15);
|
||||
}
|
||||
|
||||
button:active {
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
/* Transcript text styling */
|
||||
.transcript-container p {
|
||||
margin: 0.4rem 0;
|
||||
padding: 0.6rem;
|
||||
background: var(--background-cream);
|
||||
border-radius: 4px;
|
||||
line-height: 1.4;
|
||||
font-size: 0.95rem;
|
||||
}
|
||||
|
||||
/* Custom scrollbar - made thinner */
|
||||
.transcript-container::-webkit-scrollbar {
|
||||
width: 6px;
|
||||
}
|
||||
|
||||
.transcript-container::-webkit-scrollbar-track {
|
||||
background: var(--background-cream);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.transcript-container::-webkit-scrollbar-thumb {
|
||||
background: #e66465;
|
||||
border-radius: 3px;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.transcript-container::-webkit-scrollbar-thumb:hover {
|
||||
background: #f9a45c;
|
||||
}
|
||||
|
||||
/* Add styles for toast notifications */
|
||||
.toast {
|
||||
position: fixed;
|
||||
top: 20px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
padding: 16px 24px;
|
||||
border-radius: 4px;
|
||||
font-size: 14px;
|
||||
z-index: 1000;
|
||||
display: none;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.toast.error {
|
||||
background-color: #f44336;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.toast.warning {
|
||||
background-color: #ffd700;
|
||||
color: black;
|
||||
}
|
||||
|
||||
/* Add styles for audio visualization */
|
||||
.icon-with-spinner {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.spinner {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border: 2px solid white;
|
||||
border-top-color: transparent;
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.pulse-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 12px;
|
||||
min-width: 180px;
|
||||
}
|
||||
|
||||
.pulse-circle {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
background-color: white;
|
||||
opacity: 0.2;
|
||||
flex-shrink: 0;
|
||||
transform: translateX(-0%) scale(var(--audio-level, 1));
|
||||
transition: transform 0.1s ease;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Add toast element after body opening tag -->
|
||||
<div id="error-toast" class="toast"></div>
|
||||
<div class="hero">
|
||||
<h1>Real-time Transcription</h1>
|
||||
<p>Powered by Groq and FastRTC</p>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="transcript-container" id="transcript"></div>
|
||||
<div class="controls">
|
||||
<button id="start-button">Start Recording</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let peerConnection;
|
||||
let webrtc_id;
|
||||
let audioContext, analyser, audioSource;
|
||||
let audioLevel = 0;
|
||||
let animationFrame;
|
||||
|
||||
const startButton = document.getElementById('start-button');
|
||||
const transcriptDiv = document.getElementById('transcript');
|
||||
|
||||
function showError(message) {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = message;
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide toast after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
function handleMessage(event) {
|
||||
// Handle any WebRTC data channel messages if needed
|
||||
const eventJson = JSON.parse(event.data);
|
||||
if (eventJson.type === "error") {
|
||||
showError(eventJson.message);
|
||||
}
|
||||
console.log('Received message:', event.data);
|
||||
}
|
||||
|
||||
function updateButtonState() {
|
||||
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
|
||||
startButton.innerHTML = `
|
||||
<div class="icon-with-spinner">
|
||||
<div class="spinner"></div>
|
||||
<span>Connecting...</span>
|
||||
</div>
|
||||
`;
|
||||
} else if (peerConnection && peerConnection.connectionState === 'connected') {
|
||||
startButton.innerHTML = `
|
||||
<div class="pulse-container">
|
||||
<div class="pulse-circle"></div>
|
||||
<span>Stop Recording</span>
|
||||
</div>
|
||||
`;
|
||||
} else {
|
||||
startButton.innerHTML = 'Start Recording';
|
||||
}
|
||||
}
|
||||
|
||||
function setupAudioVisualization(stream) {
|
||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||
analyser = audioContext.createAnalyser();
|
||||
audioSource = audioContext.createMediaStreamSource(stream);
|
||||
audioSource.connect(analyser);
|
||||
analyser.fftSize = 64;
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
function updateAudioLevel() {
|
||||
analyser.getByteFrequencyData(dataArray);
|
||||
const average = Array.from(dataArray).reduce((a, b) => a + b, 0) / dataArray.length;
|
||||
audioLevel = average / 255;
|
||||
|
||||
const pulseCircle = document.querySelector('.pulse-circle');
|
||||
if (pulseCircle) {
|
||||
pulseCircle.style.setProperty('--audio-level', 1 + audioLevel);
|
||||
}
|
||||
|
||||
animationFrame = requestAnimationFrame(updateAudioLevel);
|
||||
}
|
||||
updateAudioLevel();
|
||||
}
|
||||
|
||||
async function setupWebRTC() {
|
||||
const config = __RTC_CONFIGURATION__;
|
||||
peerConnection = new RTCPeerConnection(config);
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.textContent = "Connection is taking longer than usual. Are you on a VPN?";
|
||||
toast.className = 'toast warning';
|
||||
toast.style.display = 'block';
|
||||
|
||||
// Hide warning after 5 seconds
|
||||
setTimeout(() => {
|
||||
toast.style.display = 'none';
|
||||
}, 5000);
|
||||
}, 5000);
|
||||
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
|
||||
setupAudioVisualization(stream);
|
||||
|
||||
stream.getTracks().forEach(track => {
|
||||
peerConnection.addTrack(track, stream);
|
||||
});
|
||||
|
||||
// Add connection state change listener
|
||||
peerConnection.addEventListener('connectionstatechange', () => {
|
||||
console.log('connectionstatechange', peerConnection.connectionState);
|
||||
if (peerConnection.connectionState === 'connected') {
|
||||
clearTimeout(timeoutId);
|
||||
const toast = document.getElementById('error-toast');
|
||||
toast.style.display = 'none';
|
||||
}
|
||||
updateButtonState();
|
||||
});
|
||||
|
||||
// Create data channel for messages
|
||||
const dataChannel = peerConnection.createDataChannel('text');
|
||||
dataChannel.onmessage = handleMessage;
|
||||
|
||||
// Create and send offer
|
||||
const offer = await peerConnection.createOffer();
|
||||
await peerConnection.setLocalDescription(offer);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
resolve();
|
||||
} else {
|
||||
const checkState = () => {
|
||||
if (peerConnection.iceGatheringState === "complete") {
|
||||
peerConnection.removeEventListener("icegatheringstatechange", checkState);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
peerConnection.addEventListener("icegatheringstatechange", checkState);
|
||||
}
|
||||
});
|
||||
|
||||
webrtc_id = Math.random().toString(36).substring(7);
|
||||
|
||||
const response = await fetch('/webrtc/offer', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
sdp: peerConnection.localDescription.sdp,
|
||||
type: peerConnection.localDescription.type,
|
||||
webrtc_id: webrtc_id
|
||||
})
|
||||
});
|
||||
|
||||
const serverResponse = await response.json();
|
||||
|
||||
if (serverResponse.status === 'failed') {
|
||||
showError(serverResponse.meta.error === 'concurrency_limit_reached'
|
||||
? `Too many connections. Maximum limit is ${serverResponse.meta.limit}`
|
||||
: serverResponse.meta.error);
|
||||
stop();
|
||||
startButton.textContent = 'Start Recording';
|
||||
return;
|
||||
}
|
||||
|
||||
await peerConnection.setRemoteDescription(serverResponse);
|
||||
|
||||
// Create event stream to receive transcripts
|
||||
const eventSource = new EventSource('/transcript?webrtc_id=' + webrtc_id);
|
||||
eventSource.addEventListener("output", (event) => {
|
||||
appendTranscript(event.data);
|
||||
});
|
||||
} catch (err) {
|
||||
clearTimeout(timeoutId);
|
||||
console.error('Error setting up WebRTC:', err);
|
||||
showError('Failed to establish connection. Please try again.');
|
||||
stop();
|
||||
startButton.textContent = 'Start Recording';
|
||||
}
|
||||
}
|
||||
|
||||
function appendTranscript(text) {
|
||||
const p = document.createElement('p');
|
||||
p.textContent = text;
|
||||
transcriptDiv.appendChild(p);
|
||||
transcriptDiv.scrollTop = transcriptDiv.scrollHeight;
|
||||
}
|
||||
|
||||
function stop() {
|
||||
if (animationFrame) {
|
||||
cancelAnimationFrame(animationFrame);
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
audioContext = null;
|
||||
analyser = null;
|
||||
audioSource = null;
|
||||
}
|
||||
if (peerConnection) {
|
||||
if (peerConnection.getTransceivers) {
|
||||
peerConnection.getTransceivers().forEach(transceiver => {
|
||||
if (transceiver.stop) {
|
||||
transceiver.stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (peerConnection.getSenders) {
|
||||
peerConnection.getSenders().forEach(sender => {
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
peerConnection.close();
|
||||
}, 500);
|
||||
}
|
||||
audioLevel = 0;
|
||||
updateButtonState();
|
||||
}
|
||||
|
||||
startButton.addEventListener('click', () => {
|
||||
if (startButton.textContent === 'Start Recording') {
|
||||
setupWebRTC();
|
||||
} else {
|
||||
stop();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
4
demo/whisper_realtime/requirements.txt
Normal file
4
demo/whisper_realtime/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastrtc[vad]
|
||||
groq
|
||||
python-dotenv
|
||||
twilio
|
||||
Reference in New Issue
Block a user