mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-05 09:59:22 +08:00
4
.gitignore
vendored
4
.gitignore
vendored
@@ -11,4 +11,6 @@ __tmp/*
|
||||
node_modules
|
||||
backend/**/templates/
|
||||
demo/MobileNetSSD_deploy.caffemodel
|
||||
demo/MobileNetSSD_deploy.prototxt.txt
|
||||
demo/MobileNetSSD_deploy.prototxt.txt
|
||||
.DS_Store
|
||||
test/
|
||||
69
README.md
69
README.md
@@ -1,14 +1,3 @@
|
||||
---
|
||||
tags: [gradio-custom-component, Video, streaming, webrtc, realtime]
|
||||
title: gradio_webrtc
|
||||
short_description: Stream images in realtime with webrtc
|
||||
colorFrom: blue
|
||||
colorTo: yellow
|
||||
sdk: gradio
|
||||
pinned: false
|
||||
app_file: space.py
|
||||
---
|
||||
|
||||
<h1 style='text-align: center; margin-bottom: 1rem'> Gradio WebRTC ⚡️ </h1>
|
||||
|
||||
<div style="display: flex; flex-direction: row; justify-content: center">
|
||||
@@ -30,15 +19,15 @@ pip install gradio_webrtc
|
||||
1. [Object Detection from Webcam with YOLOv10](https://huggingface.co/spaces/freddyaboulton/webrtc-yolov10n) 📷
|
||||
2. [Streaming Object Detection from Video with RT-DETR](https://huggingface.co/spaces/freddyaboulton/rt-detr-object-detection-webrtc) 🎥
|
||||
3. [Text-to-Speech](https://huggingface.co/spaces/freddyaboulton/parler-tts-streaming-webrtc) 🗣️
|
||||
4. [Conversational AI](https://huggingface.co/spaces/freddyaboulton/omni-mini-webrtc) 🤖🗣️
|
||||
|
||||
## Usage
|
||||
|
||||
The WebRTC component supports the following three use cases:
|
||||
1. Streaming video from the user webcam to the server and back
|
||||
2. Streaming Video from the server to the client
|
||||
3. Streaming Audio from the server to the client
|
||||
|
||||
Streaming Audio from client to the server and back (conversational AI) is not supported yet.
|
||||
1. [Streaming video from the user webcam to the server and back](#h-streaming-video-from-the-user-webcam-to-the-server-and-back)
|
||||
2. [Streaming Video from the server to the client](#h-streaming-video-from-the-server-to-the-client)
|
||||
3. [Streaming Audio from the server to the client](#h-streaming-audio-from-the-server-to-the-client)
|
||||
4. [Streaming Audio from the client to the server and back (conversational AI)](#h-conversational-ai)
|
||||
|
||||
|
||||
## Streaming Video from the User Webcam to the Server and Back
|
||||
@@ -78,7 +67,7 @@ as a **numpy array** and returns the processed frame also as a **numpy array**.
|
||||
* The `inputs` parameter should be a list where the first element is the WebRTC component. The only output allowed is the WebRTC component.
|
||||
* The `time_limit` parameter is the maximum time in seconds the video stream will run. If the time limit is reached, the video stream will stop.
|
||||
|
||||
## Streaming Video from the User Webcam to the Server and Back
|
||||
## Streaming Video from the server to the client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
@@ -143,6 +132,52 @@ with gr.Blocks() as demo:
|
||||
* The numpy array should be of shape (1, num_samples).
|
||||
* The `outputs` parameter should be a list with the WebRTC component as the only element.
|
||||
|
||||
## Conversational AI
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC, StreamHandler
|
||||
from queue import Queue
|
||||
import time
|
||||
|
||||
|
||||
class EchoHandler(StreamHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.queue = Queue()
|
||||
|
||||
def receive(self, frame: tuple[int, np.ndarray] | np.ndarray) -> None:
|
||||
self.queue.put(frame)
|
||||
|
||||
def emit(self) -> None:
|
||||
return self.queue.get()
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
with gr.Column():
|
||||
with gr.Group():
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=None,
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
audio.stream(fn=EchoHandler(), inputs=[audio], outputs=[audio], time_limit=15)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Instead of passing a function to the `stream` event's `fn` parameter, pass a `StreamHandler` implementation. The `StreamHandler` above simply echoes the audio back to the client.
|
||||
* The `StreamHandler` class has two methods: `receive` and `emit`. The `receive` method is called when a new frame is received from the client, and the `emit` method returns the next frame to send to the client.
|
||||
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
||||
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
||||
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
||||
* The `emit` method SHOULD NOT block. If a frame is not ready to be sent, the method should return None.
|
||||
|
||||
## Deployment
|
||||
|
||||
When deploying in a cloud environment (like Hugging Face Spaces, EC2, etc), you need to set up a TURN server to relay the WebRTC traffic.
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from .webrtc import WebRTC
|
||||
from .webrtc import StreamHandler, WebRTC
|
||||
|
||||
__all__ = ["WebRTC"]
|
||||
__all__ = ["StreamHandler", "WebRTC"]
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import asyncio
|
||||
import fractions
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import av
|
||||
@@ -13,56 +11,71 @@ logger = logging.getLogger(__name__)
|
||||
AUDIO_PTIME = 0.020
|
||||
|
||||
|
||||
def player_worker_decode(
|
||||
loop,
|
||||
callable: Callable,
|
||||
stream,
|
||||
async def player_worker_decode(
|
||||
next_frame: Callable,
|
||||
queue: asyncio.Queue,
|
||||
throttle_playback: bool,
|
||||
thread_quit: threading.Event,
|
||||
thread_quit: asyncio.Event,
|
||||
quit_on_none: bool = False,
|
||||
sample_rate: int = 48000,
|
||||
frame_size: int = int(48000 * AUDIO_PTIME),
|
||||
):
|
||||
audio_sample_rate = 48000
|
||||
audio_samples = 0
|
||||
audio_time_base = fractions.Fraction(1, audio_sample_rate)
|
||||
audio_resampler = av.AudioResampler(
|
||||
audio_time_base = fractions.Fraction(1, sample_rate)
|
||||
audio_resampler = av.AudioResampler( # type: ignore
|
||||
format="s16",
|
||||
layout="stereo",
|
||||
rate=audio_sample_rate,
|
||||
frame_size=int(audio_sample_rate * AUDIO_PTIME),
|
||||
rate=sample_rate,
|
||||
frame_size=frame_size,
|
||||
)
|
||||
|
||||
frame_time = None
|
||||
start_time = time.time()
|
||||
generator = None
|
||||
|
||||
while not thread_quit.is_set():
|
||||
if stream.latest_args == "not_set":
|
||||
continue
|
||||
if generator is None:
|
||||
generator = callable(*stream.latest_args)
|
||||
try:
|
||||
frame = next(generator)
|
||||
except Exception as exc:
|
||||
if isinstance(exc, StopIteration):
|
||||
logger.debug("Stopping audio stream")
|
||||
asyncio.run_coroutine_threadsafe(queue.put(None), loop)
|
||||
thread_quit.set()
|
||||
break
|
||||
# Get next frame
|
||||
frame = await asyncio.wait_for(next_frame(), timeout=5)
|
||||
|
||||
# read up to 1 second ahead
|
||||
if throttle_playback:
|
||||
elapsed_time = time.time() - start_time
|
||||
if frame_time and frame_time > elapsed_time + 1:
|
||||
time.sleep(0.1)
|
||||
sample_rate, audio_array = frame
|
||||
format = "s16" if audio_array.dtype == "int16" else "fltp"
|
||||
frame = av.AudioFrame.from_ndarray(audio_array, format=format, layout="mono")
|
||||
frame.sample_rate = sample_rate
|
||||
for frame in audio_resampler.resample(frame):
|
||||
# fix timestamps
|
||||
frame.pts = audio_samples
|
||||
frame.time_base = audio_time_base
|
||||
audio_samples += frame.samples
|
||||
if frame is None:
|
||||
if quit_on_none:
|
||||
await queue.put(None)
|
||||
break
|
||||
continue
|
||||
|
||||
frame_time = frame.time
|
||||
asyncio.run_coroutine_threadsafe(queue.put(frame), loop)
|
||||
if len(frame) == 2:
|
||||
sample_rate, audio_array = frame
|
||||
layout = "mono"
|
||||
elif len(frame) == 3:
|
||||
sample_rate, audio_array, layout = frame
|
||||
|
||||
logger.debug(
|
||||
"received array with shape %s sample rate %s layout %s",
|
||||
audio_array.shape,
|
||||
sample_rate,
|
||||
layout,
|
||||
)
|
||||
format = "s16" if audio_array.dtype == "int16" else "fltp"
|
||||
|
||||
# Convert to audio frame and resample
|
||||
# This runs in the same timeout context
|
||||
frame = av.AudioFrame.from_ndarray(
|
||||
audio_array, format=format, layout=layout
|
||||
)
|
||||
frame.sample_rate = sample_rate
|
||||
|
||||
for processed_frame in audio_resampler.resample(frame):
|
||||
processed_frame.pts = audio_samples
|
||||
processed_frame.time_base = audio_time_base
|
||||
audio_samples += processed_frame.samples
|
||||
await queue.put(processed_frame)
|
||||
logger.debug("Queue size utils.py: %s", queue.qsize())
|
||||
|
||||
except (TimeoutError, asyncio.TimeoutError):
|
||||
logger.warning(
|
||||
"Timeout in frame processing cycle after %s seconds - resetting", 5
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
exec = traceback.format_exc()
|
||||
logger.debug("traceback %s", exec)
|
||||
logger.error("Error processing frame: %s", str(e))
|
||||
continue
|
||||
|
||||
@@ -3,21 +3,26 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import functools
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Any, Generator, Literal, Sequence, cast
|
||||
|
||||
import anyio.to_thread
|
||||
import av
|
||||
import numpy as np
|
||||
from aiortc import (
|
||||
AudioStreamTrack,
|
||||
MediaStreamTrack,
|
||||
RTCPeerConnection,
|
||||
RTCSessionDescription,
|
||||
VideoStreamTrack,
|
||||
)
|
||||
from aiortc.contrib.media import MediaRelay, VideoFrame # type: ignore
|
||||
from aiortc.contrib.media import AudioFrame, MediaRelay, VideoFrame # type: ignore
|
||||
from aiortc.mediastreams import MediaStreamError
|
||||
from gradio import wasm_utils
|
||||
from gradio.components.base import Component, server
|
||||
@@ -47,7 +52,7 @@ class VideoCallback(VideoStreamTrack):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
track,
|
||||
track: MediaStreamTrack,
|
||||
event_handler: Callable,
|
||||
) -> None:
|
||||
super().__init__() # don't forget this!
|
||||
@@ -72,7 +77,7 @@ class VideoCallback(VideoStreamTrack):
|
||||
async def recv(self):
|
||||
try:
|
||||
try:
|
||||
frame = await self.track.recv()
|
||||
frame = cast(VideoFrame, await self.track.recv())
|
||||
except MediaStreamError:
|
||||
return
|
||||
frame_array = frame.to_ndarray(format="bgr24")
|
||||
@@ -95,9 +100,127 @@ class VideoCallback(VideoStreamTrack):
|
||||
|
||||
return new_frame
|
||||
except Exception as e:
|
||||
logger.debug(e)
|
||||
logger.debug("exception %s", e)
|
||||
exec = traceback.format_exc()
|
||||
logger.debug(exec)
|
||||
logger.debug("traceback %s", exec)
|
||||
|
||||
|
||||
class StreamHandler(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
expected_layout: Literal["mono", "stereo"] = "mono",
|
||||
output_sample_rate: int = 24000,
|
||||
output_frame_size: int = 960,
|
||||
) -> None:
|
||||
self.expected_layout = expected_layout
|
||||
self.output_sample_rate = output_sample_rate
|
||||
self.output_frame_size = output_frame_size
|
||||
self._resampler = None
|
||||
|
||||
def resample(self, frame: AudioFrame) -> Generator[AudioFrame, None, None]:
|
||||
if self._resampler is None:
|
||||
self._resampler = av.AudioResampler( # type: ignore
|
||||
format="s16",
|
||||
layout=self.expected_layout,
|
||||
rate=frame.sample_rate,
|
||||
frame_size=frame.samples,
|
||||
)
|
||||
yield from self._resampler.resample(frame)
|
||||
|
||||
@abstractmethod
|
||||
def receive(self, frame: tuple[int, np.ndarray] | np.ndarray) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def emit(self) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class AudioCallback(AudioStreamTrack):
|
||||
kind = "audio"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
track: MediaStreamTrack,
|
||||
event_handler: StreamHandler,
|
||||
) -> None:
|
||||
self.track = track
|
||||
self.event_handler = event_handler
|
||||
self.current_timestamp = 0
|
||||
self.latest_args: str | list[Any] = "not_set"
|
||||
self.queue = asyncio.Queue()
|
||||
self.thread_quit = asyncio.Event()
|
||||
self._start: float | None = None
|
||||
self.has_started = False
|
||||
self.last_timestamp = 0
|
||||
super().__init__()
|
||||
|
||||
async def process_input_frames(self) -> None:
|
||||
while not self.thread_quit.is_set():
|
||||
try:
|
||||
frame = cast(AudioFrame, await self.track.recv())
|
||||
for frame in self.event_handler.resample(frame):
|
||||
numpy_array = frame.to_ndarray()
|
||||
await anyio.to_thread.run_sync(
|
||||
self.event_handler.receive, (frame.sample_rate, numpy_array)
|
||||
)
|
||||
except MediaStreamError:
|
||||
logger.debug("MediaStreamError in process_input_frames")
|
||||
break
|
||||
|
||||
def start(self):
|
||||
if not self.has_started:
|
||||
loop = asyncio.get_running_loop()
|
||||
callable = functools.partial(
|
||||
loop.run_in_executor, None, self.event_handler.emit
|
||||
)
|
||||
asyncio.create_task(self.process_input_frames())
|
||||
asyncio.create_task(
|
||||
player_worker_decode(
|
||||
callable,
|
||||
self.queue,
|
||||
self.thread_quit,
|
||||
False,
|
||||
self.event_handler.output_sample_rate,
|
||||
self.event_handler.output_frame_size,
|
||||
)
|
||||
)
|
||||
self.has_started = True
|
||||
|
||||
async def recv(self):
|
||||
try:
|
||||
if self.readyState != "live":
|
||||
raise MediaStreamError
|
||||
|
||||
self.start()
|
||||
frame = await self.queue.get()
|
||||
logger.debug("frame %s", frame)
|
||||
|
||||
data_time = frame.time
|
||||
|
||||
if time.time() - self.last_timestamp > 10 * (
|
||||
self.event_handler.output_frame_size
|
||||
/ self.event_handler.output_sample_rate
|
||||
):
|
||||
self._start = None
|
||||
|
||||
# control playback rate
|
||||
if self._start is None:
|
||||
self._start = time.time() - data_time
|
||||
else:
|
||||
wait = self._start + data_time - time.time()
|
||||
await asyncio.sleep(wait)
|
||||
self.last_timestamp = time.time()
|
||||
return frame
|
||||
except Exception as e:
|
||||
logger.debug("exception %s", e)
|
||||
exec = traceback.format_exc()
|
||||
logger.debug("traceback %s", exec)
|
||||
|
||||
def stop(self):
|
||||
logger.debug("audio callback stop")
|
||||
self.thread_quit.set()
|
||||
super().stop()
|
||||
|
||||
|
||||
class ServerToClientVideo(VideoStreamTrack):
|
||||
@@ -113,32 +236,18 @@ class ServerToClientVideo(VideoStreamTrack):
|
||||
) -> None:
|
||||
super().__init__() # don't forget this!
|
||||
self.event_handler = event_handler
|
||||
self.args_set = asyncio.Event()
|
||||
self.latest_args: str | list[Any] = "not_set"
|
||||
self.generator: Generator[Any, None, Any] | None = None
|
||||
|
||||
def add_frame_to_payload(
|
||||
self, args: list[Any], frame: np.ndarray | None
|
||||
) -> list[Any]:
|
||||
new_args = []
|
||||
for val in args:
|
||||
if isinstance(val, str) and val == "__webrtc_value__":
|
||||
new_args.append(frame)
|
||||
else:
|
||||
new_args.append(val)
|
||||
return new_args
|
||||
|
||||
def array_to_frame(self, array: np.ndarray) -> VideoFrame:
|
||||
return VideoFrame.from_ndarray(array, format="bgr24")
|
||||
|
||||
async def recv(self):
|
||||
try:
|
||||
pts, time_base = await self.next_timestamp()
|
||||
if self.latest_args == "not_set":
|
||||
frame = self.array_to_frame(np.zeros((480, 640, 3), dtype=np.uint8))
|
||||
frame.pts = pts
|
||||
frame.time_base = time_base
|
||||
return frame
|
||||
elif self.generator is None:
|
||||
await self.args_set.wait()
|
||||
if self.generator is None:
|
||||
self.generator = cast(
|
||||
Generator[Any, None, Any], self.event_handler(*self.latest_args)
|
||||
)
|
||||
@@ -154,9 +263,9 @@ class ServerToClientVideo(VideoStreamTrack):
|
||||
next_frame.time_base = time_base
|
||||
return next_frame
|
||||
except Exception as e:
|
||||
logger.debug(e)
|
||||
logger.debug("exception %s", e)
|
||||
exec = traceback.format_exc()
|
||||
logger.debug(exec)
|
||||
logger.debug("traceback %s ", exec)
|
||||
|
||||
|
||||
class ServerToClientAudio(AudioStreamTrack):
|
||||
@@ -169,28 +278,38 @@ class ServerToClientAudio(AudioStreamTrack):
|
||||
self.generator: Generator[Any, None, Any] | None = None
|
||||
self.event_handler = event_handler
|
||||
self.current_timestamp = 0
|
||||
self.latest_args = "not_set"
|
||||
self.latest_args: str | list[Any] = "not_set"
|
||||
self.args_set = threading.Event()
|
||||
self.queue = asyncio.Queue()
|
||||
self.thread_quit = threading.Event()
|
||||
self.__thread = None
|
||||
self.thread_quit = asyncio.Event()
|
||||
self.has_started = False
|
||||
self._start: float | None = None
|
||||
super().__init__()
|
||||
|
||||
def next(self) -> tuple[int, np.ndarray] | None:
|
||||
self.args_set.wait()
|
||||
if self.generator is None:
|
||||
self.generator = self.event_handler(*self.latest_args)
|
||||
if self.generator is not None:
|
||||
try:
|
||||
frame = next(self.generator)
|
||||
return frame
|
||||
except StopIteration:
|
||||
self.thread_quit.set()
|
||||
|
||||
def start(self):
|
||||
if self.__thread is None:
|
||||
self.__thread = threading.Thread(
|
||||
name="generator-runner",
|
||||
target=player_worker_decode,
|
||||
args=(
|
||||
asyncio.get_event_loop(),
|
||||
self.event_handler,
|
||||
self,
|
||||
if not self.has_started:
|
||||
loop = asyncio.get_running_loop()
|
||||
callable = functools.partial(loop.run_in_executor, None, self.next)
|
||||
asyncio.create_task(
|
||||
player_worker_decode(
|
||||
callable,
|
||||
self.queue,
|
||||
False,
|
||||
self.thread_quit,
|
||||
),
|
||||
True,
|
||||
)
|
||||
)
|
||||
self.__thread.start()
|
||||
self.has_started = True
|
||||
|
||||
async def recv(self):
|
||||
try:
|
||||
@@ -215,15 +334,13 @@ class ServerToClientAudio(AudioStreamTrack):
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
logger.debug(e)
|
||||
logger.debug("exception %s", e)
|
||||
exec = traceback.format_exc()
|
||||
logger.debug(exec)
|
||||
logger.debug("traceback %s", exec)
|
||||
|
||||
def stop(self):
|
||||
logger.debug("audio-to-client stop callback")
|
||||
self.thread_quit.set()
|
||||
if self.__thread is not None:
|
||||
self.__thread.join()
|
||||
self.__thread = None
|
||||
super().stop()
|
||||
|
||||
|
||||
@@ -241,7 +358,7 @@ class WebRTC(Component):
|
||||
pcs: set[RTCPeerConnection] = set([])
|
||||
relay = MediaRelay()
|
||||
connections: dict[
|
||||
str, VideoCallback | ServerToClientVideo | ServerToClientAudio
|
||||
str, VideoCallback | ServerToClientVideo | ServerToClientAudio | AudioCallback
|
||||
] = {}
|
||||
|
||||
EVENTS = ["tick"]
|
||||
@@ -266,6 +383,7 @@ class WebRTC(Component):
|
||||
key: int | str | None = None,
|
||||
mirror_webcam: bool = True,
|
||||
rtc_configuration: dict[str, Any] | None = None,
|
||||
track_constraints: dict[str, Any] | None = None,
|
||||
time_limit: float | None = None,
|
||||
mode: Literal["send-receive", "receive"] = "send-receive",
|
||||
modality: Literal["video", "audio"] = "video",
|
||||
@@ -300,9 +418,6 @@ class WebRTC(Component):
|
||||
streaming: when used set as an output, takes video chunks yielded from the backend and combines them into one streaming video output. Each chunk should be a video file with a .ts extension using an h.264 encoding. Mp4 files are also accepted but they will be converted to h.264 encoding.
|
||||
watermark: an image file to be included as a watermark on the video. The image is not scaled and is displayed on the bottom right of the video. Valid formats for the image are: jpeg, png.
|
||||
"""
|
||||
if modality == "audio" and mode == "send-receive":
|
||||
raise ValueError("Audio modality is not supported in send-receive mode")
|
||||
|
||||
self.time_limit = time_limit
|
||||
self.height = height
|
||||
self.width = width
|
||||
@@ -311,7 +426,24 @@ class WebRTC(Component):
|
||||
self.rtc_configuration = rtc_configuration
|
||||
self.mode = mode
|
||||
self.modality = modality
|
||||
self.event_handler: Callable | None = None
|
||||
if track_constraints is None and modality == "audio":
|
||||
track_constraints = {
|
||||
"echoCancellation": True,
|
||||
"noiseSuppression": {"exact": True},
|
||||
"autoGainControl": {"exact": True},
|
||||
"sampleRate": {"ideal": 24000},
|
||||
"sampleSize": {"ideal": 16},
|
||||
"channelCount": {"exact": 1},
|
||||
}
|
||||
if track_constraints is None and modality == "video":
|
||||
track_constraints = {
|
||||
"facingMode": "user",
|
||||
"width": {"ideal": 500},
|
||||
"height": {"ideal": 500},
|
||||
"frameRate": {"ideal": 30},
|
||||
}
|
||||
self.track_constraints = track_constraints
|
||||
self.event_handler: Callable | StreamHandler | None = None
|
||||
super().__init__(
|
||||
label=label,
|
||||
every=every,
|
||||
@@ -355,10 +487,11 @@ class WebRTC(Component):
|
||||
)
|
||||
elif self.mode == "receive":
|
||||
self.connections[webrtc_id].latest_args = list(args)
|
||||
self.connections[webrtc_id].args_set.set() # type: ignore
|
||||
|
||||
def stream(
|
||||
self,
|
||||
fn: Callable[..., Any] | None = None,
|
||||
fn: Callable[..., Any] | StreamHandler | None = None,
|
||||
inputs: Block | Sequence[Block] | set[Block] | None = None,
|
||||
outputs: Block | Sequence[Block] | set[Block] | None = None,
|
||||
js: str | None = None,
|
||||
@@ -384,6 +517,15 @@ class WebRTC(Component):
|
||||
self.event_handler = fn
|
||||
self.time_limit = time_limit
|
||||
|
||||
if (
|
||||
self.mode == "send-receive"
|
||||
and self.modality == "audio"
|
||||
and not isinstance(self.event_handler, StreamHandler)
|
||||
):
|
||||
raise ValueError(
|
||||
"In the send-receive mode for audio, the event handler must be an instance of StreamHandler."
|
||||
)
|
||||
|
||||
if self.mode == "send-receive":
|
||||
if cast(list[Block], inputs)[0] != self:
|
||||
raise ValueError(
|
||||
@@ -424,9 +566,9 @@ class WebRTC(Component):
|
||||
"In the receive mode stream event, the trigger parameter must be provided"
|
||||
)
|
||||
trigger(lambda: "start_webrtc_stream", inputs=None, outputs=self)
|
||||
self.tick(
|
||||
self.tick( # type: ignore
|
||||
self.set_output,
|
||||
inputs=[self] + inputs,
|
||||
inputs=[self] + list(inputs),
|
||||
outputs=None,
|
||||
concurrency_id=concurrency_id,
|
||||
)
|
||||
@@ -439,7 +581,7 @@ class WebRTC(Component):
|
||||
@server
|
||||
async def offer(self, body):
|
||||
logger.debug("Starting to handle offer")
|
||||
logger.debug("Offer body", body)
|
||||
logger.debug("Offer body %s", body)
|
||||
if len(self.connections) >= cast(int, self.concurrency_limit):
|
||||
return {"status": "failed"}
|
||||
|
||||
@@ -450,7 +592,7 @@ class WebRTC(Component):
|
||||
|
||||
@pc.on("iceconnectionstatechange")
|
||||
async def on_iceconnectionstatechange():
|
||||
logger.debug("ICE connection state change", pc.iceConnectionState)
|
||||
logger.debug("ICE connection state change %s", pc.iceConnectionState)
|
||||
if pc.iceConnectionState == "failed":
|
||||
await pc.close()
|
||||
self.connections.pop(body["webrtc_id"], None)
|
||||
@@ -458,9 +600,12 @@ class WebRTC(Component):
|
||||
|
||||
@pc.on("connectionstatechange")
|
||||
async def on_connectionstatechange():
|
||||
logger.debug("pc.connectionState %s", pc.connectionState)
|
||||
if pc.connectionState in ["failed", "closed"]:
|
||||
await pc.close()
|
||||
self.connections.pop(body["webrtc_id"], None)
|
||||
connection = self.connections.pop(body["webrtc_id"], None)
|
||||
if connection:
|
||||
connection.stop()
|
||||
self.pcs.discard(pc)
|
||||
if pc.connectionState == "connected":
|
||||
if self.time_limit is not None:
|
||||
@@ -468,12 +613,19 @@ class WebRTC(Component):
|
||||
|
||||
@pc.on("track")
|
||||
def on_track(track):
|
||||
cb = VideoCallback(
|
||||
self.relay.subscribe(track),
|
||||
event_handler=cast(Callable, self.event_handler),
|
||||
)
|
||||
relay = MediaRelay()
|
||||
if self.modality == "video":
|
||||
cb = VideoCallback(
|
||||
relay.subscribe(track),
|
||||
event_handler=cast(Callable, self.event_handler),
|
||||
)
|
||||
elif self.modality == "audio":
|
||||
cb = AudioCallback(
|
||||
relay.subscribe(track),
|
||||
event_handler=cast(StreamHandler, self.event_handler),
|
||||
)
|
||||
self.connections[body["webrtc_id"]] = cb
|
||||
logger.debug("Adding track to peer connection", cb)
|
||||
logger.debug("Adding track to peer connection %s", cb)
|
||||
pc.addTrack(cb)
|
||||
|
||||
if self.mode == "receive":
|
||||
@@ -482,7 +634,7 @@ class WebRTC(Component):
|
||||
elif self.modality == "audio":
|
||||
cb = ServerToClientAudio(cast(Callable, self.event_handler))
|
||||
|
||||
logger.debug("Adding track to peer connection", cb)
|
||||
logger.debug("Adding track to peer connection %s", cb)
|
||||
pc.addTrack(cb)
|
||||
self.connections[body["webrtc_id"]] = cb
|
||||
cb.on("ended", lambda: self.connections.pop(body["webrtc_id"], None))
|
||||
|
||||
327
demo/app.py
327
demo/app.py
@@ -1,72 +1,291 @@
|
||||
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
|
||||
_docs = {'WebRTC':
|
||||
{'description': 'Stream audio/video with WebRTC',
|
||||
'members': {'__init__':
|
||||
{
|
||||
'rtc_configuration': {'type': 'dict[str, Any] | None', 'default': 'None', 'description': "The configration dictionary to pass to the RTCPeerConnection constructor. If None, the default configuration is used."},
|
||||
'height': {'type': 'int | str | None', 'default': 'None', 'description': 'The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.'},
|
||||
'width': {'type': 'int | str | None', 'default': 'None', 'description': 'The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.'},
|
||||
'label': {'type': 'str | None', 'default': 'None', 'description': 'the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.'},
|
||||
'show_label': {'type': 'bool | None', 'default': 'None', 'description': 'if True, will display label.'}, 'container': {'type': 'bool', 'default': 'True', 'description': 'if True, will place the component in a container - providing some extra padding around the border.'},
|
||||
'scale': {'type': 'int | None', 'default': 'None', 'description': 'relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.'},
|
||||
'min_width': {'type': 'int', 'default': '160', 'description': 'minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.'},
|
||||
'interactive': {'type': 'bool | None', 'default': 'None', 'description': 'if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.'}, 'visible': {'type': 'bool', 'default': 'True', 'description': 'if False, component will be hidden.'},
|
||||
'elem_id': {'type': 'str | None', 'default': 'None', 'description': 'an optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.'},
|
||||
'elem_classes': {'type': 'list[str] | str | None', 'default': 'None', 'description': 'an optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.'},
|
||||
'render': {'type': 'bool', 'default': 'True', 'description': 'if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.'},
|
||||
'key': {'type': 'int | str | None', 'default': 'None', 'description': 'if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.'},
|
||||
'mirror_webcam': {'type': 'bool', 'default': 'True', 'description': 'if True webcam will be mirrored. Default is True.'},
|
||||
},
|
||||
'events': {'tick': {'type': None, 'default': None, 'description': ''}}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'WebRTC': []}}}
|
||||
}
|
||||
|
||||
|
||||
abs_path = os.path.join(os.path.dirname(__file__), "css.css")
|
||||
|
||||
with gr.Blocks(
|
||||
css_paths=abs_path,
|
||||
theme=gr.themes.Default(
|
||||
font_mono=[
|
||||
gr.themes.GoogleFont("Inconsolata"),
|
||||
"monospace",
|
||||
],
|
||||
),
|
||||
) as demo:
|
||||
gr.Markdown(
|
||||
"""
|
||||
<h1 style='text-align: center; margin-bottom: 1rem'> Gradio WebRTC ⚡️ </h1>
|
||||
|
||||
<div style="display: flex; flex-direction: row; justify-content: center">
|
||||
<img style="display: block; padding-right: 5px; height: 20px;" alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.6%20-%20orange">
|
||||
<a href="https://github.com/freddyaboulton/gradio-webrtc" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/github-white?logo=github&logoColor=black"></a>
|
||||
</div>
|
||||
""", elem_classes=["md-custom"], header_links=True)
|
||||
gr.Markdown(
|
||||
"""
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install gradio_webrtc
|
||||
```
|
||||
|
||||
## Examples:
|
||||
1. [Object Detection from Webcam with YOLOv10](https://huggingface.co/spaces/freddyaboulton/webrtc-yolov10n) 📷
|
||||
2. [Streaming Object Detection from Video with RT-DETR](https://huggingface.co/spaces/freddyaboulton/rt-detr-object-detection-webrtc) 🎥
|
||||
3. [Text-to-Speech](https://huggingface.co/spaces/freddyaboulton/parler-tts-streaming-webrtc) 🗣️
|
||||
4. [Conversational AI](https://huggingface.co/spaces/freddyaboulton/omni-mini-webrtc) 🤖🗣️
|
||||
|
||||
## Usage
|
||||
|
||||
The WebRTC component supports the following three use cases:
|
||||
1. [Streaming video from the user webcam to the server and back](#h-streaming-video-from-the-user-webcam-to-the-server-and-back)
|
||||
2. [Streaming Video from the server to the client](#h-streaming-video-from-the-server-to-the-client)
|
||||
3. [Streaming Audio from the server to the client](#h-streaming-audio-from-the-server-to-the-client)
|
||||
4. [Streaming Audio from the client to the server and back (conversational AI)](#h-conversational-ai)
|
||||
|
||||
|
||||
## Streaming Video from the User Webcam to the Server and Back
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
import cv2
|
||||
from huggingface_hub import hf_hub_download
|
||||
from gradio_webrtc import WebRTC
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
... your detection code here ...
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
image = WebRTC(label="Stream", mode="send-receive", modality="video")
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
image.stream(
|
||||
fn=detection,
|
||||
inputs=[image, conf_threshold],
|
||||
outputs=[image], time_limit=10
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
|
||||
```
|
||||
* Set the `mode` parameter to `send-receive` and `modality` to "video".
|
||||
* The `stream` event's `fn` parameter is a function that receives the next frame from the webcam
|
||||
as a **numpy array** and returns the processed frame also as a **numpy array**.
|
||||
* Numpy arrays are in (height, width, 3) format where the color channels are in RGB format.
|
||||
* The `inputs` parameter should be a list where the first element is the WebRTC component. The only output allowed is the WebRTC component.
|
||||
* The `time_limit` parameter is the maximum time in seconds the video stream will run. If the time limit is reached, the video stream will stop.
|
||||
|
||||
## Streaming Video from the server to the client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
import cv2
|
||||
|
||||
def generation():
|
||||
url = "https://download.tsi.telecom-paristech.fr/gpac/dataset/dash/uhd/mux_sources/hevcds_720p30_2M.mp4"
|
||||
cap = cv2.VideoCapture(url)
|
||||
iterating = True
|
||||
while iterating:
|
||||
iterating, frame = cap.read()
|
||||
yield frame
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
output_video = WebRTC(label="Video Stream", mode="receive", modality="video")
|
||||
button = gr.Button("Start", variant="primary")
|
||||
output_video.stream(
|
||||
fn=generation, inputs=None, outputs=[output_video],
|
||||
trigger=button.click
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "video".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next frame from the video as a **numpy array**.
|
||||
* The only output allowed is the WebRTC component.
|
||||
* The `trigger` parameter the gradio event that will trigger the webrtc connection. In this case, the button click event.
|
||||
|
||||
## Streaming Audio from the Server to the Client
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
from pydub import AudioSegment
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file("/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav")
|
||||
yield (segment.frame_rate, np.array(segment.get_array_of_samples()).reshape(1, -1))
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
audio = WebRTC(label="Stream", mode="receive", modality="audio")
|
||||
num_steps = gr.Slider(
|
||||
label="Number of Steps",
|
||||
minimum=1,
|
||||
maximum=10,
|
||||
step=1,
|
||||
value=5,
|
||||
)
|
||||
button = gr.Button("Generate")
|
||||
|
||||
audio.stream(
|
||||
fn=generation, inputs=[num_steps], outputs=[audio],
|
||||
trigger=button.click
|
||||
)
|
||||
```
|
||||
|
||||
* Set the "mode" parameter to "receive" and "modality" to "audio".
|
||||
* The `stream` event's `fn` parameter is a generator function that yields the next audio segment as a tuple of (frame_rate, audio_samples).
|
||||
* The numpy array should be of shape (1, num_samples).
|
||||
* The `outputs` parameter should be a list with the WebRTC component as the only element.
|
||||
|
||||
## Conversational AI
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC, StreamHandler
|
||||
from queue import Queue
|
||||
import time
|
||||
|
||||
|
||||
class EchoHandler(StreamHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.queue = Queue()
|
||||
|
||||
def receive(self, frame: tuple[int, np.ndarray] | np.ndarray) -> None:
|
||||
self.queue.put(frame)
|
||||
|
||||
def emit(self) -> None:
|
||||
return self.queue.get()
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
with gr.Column():
|
||||
with gr.Group():
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=None,
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
audio.stream(fn=EchoHandler(), inputs=[audio], outputs=[audio], time_limit=15)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
* Instead of passing a function to the `stream` event's `fn` parameter, pass a `StreamHandler` implementation. The `StreamHandler` above simply echoes the audio back to the client.
|
||||
* The `StreamHandler` class has two methods: `receive` and `emit`. The `receive` method is called when a new frame is received from the client, and the `emit` method returns the next frame to send to the client.
|
||||
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
||||
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
||||
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
||||
* The `emit` method SHOULD NOT block. If a frame is not ready to be sent, the method should return None.
|
||||
|
||||
## Deployment
|
||||
|
||||
When deploying in a cloud environment (like Hugging Face Spaces, EC2, etc), you need to set up a TURN server to relay the WebRTC traffic.
|
||||
The easiest way to do this is to use a service like Twilio.
|
||||
|
||||
```python
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
from inference import YOLOv10
|
||||
|
||||
model_file = hf_hub_download(
|
||||
repo_id="onnx-community/yolov10n", filename="onnx/model.onnx"
|
||||
)
|
||||
|
||||
model = YOLOv10(model_file)
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
|
||||
if account_sid and auth_token:
|
||||
client = Client(account_sid, auth_token)
|
||||
client = Client(account_sid, auth_token)
|
||||
|
||||
token = client.tokens.create()
|
||||
token = client.tokens.create()
|
||||
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
else:
|
||||
rtc_configuration = None
|
||||
rtc_configuration = {
|
||||
"iceServers": token.ice_servers,
|
||||
"iceTransportPolicy": "relay",
|
||||
}
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
...
|
||||
rtc = WebRTC(rtc_configuration=rtc_configuration, ...)
|
||||
...
|
||||
```
|
||||
""", elem_classes=["md-custom"], header_links=True)
|
||||
|
||||
|
||||
def detection(image, conf_threshold=0.3):
|
||||
image = cv2.resize(image, (model.input_width, model.input_height))
|
||||
new_image = model.detect_objects(image, conf_threshold)
|
||||
return cv2.resize(new_image, (500, 500))
|
||||
gr.Markdown("""
|
||||
##
|
||||
""", elem_classes=["md-custom"], header_links=True)
|
||||
|
||||
gr.ParamViewer(value=_docs["WebRTC"]["members"]["__init__"], linkify=[])
|
||||
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
demo.load(None, js=r"""function() {
|
||||
const refs = {};
|
||||
const user_fn_refs = {
|
||||
WebRTC: [], };
|
||||
requestAnimationFrame(() => {
|
||||
|
||||
Object.entries(user_fn_refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}-user-fn`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
with gr.Blocks(css=css) as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
YOLOv10 Webcam Stream (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
gr.HTML(
|
||||
"""
|
||||
<h3 style='text-align: center'>
|
||||
<a href='https://arxiv.org/abs/2405.14458' target='_blank'>arXiv</a> | <a href='https://github.com/THU-MIG/yolov10' target='_blank'>github</a>
|
||||
</h3>
|
||||
"""
|
||||
)
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
image = WebRTC(label="Stream", rtc_configuration=rtc_configuration)
|
||||
conf_threshold = gr.Slider(
|
||||
label="Confidence Threshold",
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.05,
|
||||
value=0.30,
|
||||
)
|
||||
Object.entries(refs).forEach(([key, refs]) => {
|
||||
if (refs.length > 0) {
|
||||
const el = document.querySelector(`.${key}`);
|
||||
if (!el) return;
|
||||
refs.forEach(ref => {
|
||||
el.innerHTML = el.innerHTML.replace(
|
||||
new RegExp("\\b"+ref+"\\b", "g"),
|
||||
`<a href="#h-${ref.toLowerCase()}">${ref}</a>`
|
||||
);
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
image.stream(
|
||||
fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10
|
||||
)
|
||||
""")
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
demo.launch()
|
||||
@@ -1,10 +1,11 @@
|
||||
import gradio as gr
|
||||
import cv2
|
||||
from huggingface_hub import hf_hub_download
|
||||
from gradio_webrtc import WebRTC
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
from huggingface_hub import hf_hub_download
|
||||
from inference import YOLOv10
|
||||
from twilio.rest import Client
|
||||
|
||||
model_file = hf_hub_download(
|
||||
repo_id="onnx-community/yolov10n", filename="onnx/model.onnx"
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
from pydub import AudioSegment
|
||||
|
||||
|
||||
from twilio.rest import Client
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
@@ -24,12 +23,16 @@ else:
|
||||
|
||||
import time
|
||||
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file("/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav")
|
||||
yield (segment.frame_rate, np.array(segment.get_array_of_samples()).reshape(1, -1))
|
||||
time.sleep(3.5)
|
||||
|
||||
segment = AudioSegment.from_file(
|
||||
"/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav"
|
||||
)
|
||||
yield (
|
||||
segment.frame_rate,
|
||||
np.array(segment.get_array_of_samples()).reshape(1, -1),
|
||||
)
|
||||
|
||||
css = """.my-group {max-width: 600px !important; max-height: 600 !important;}
|
||||
.my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
|
||||
@@ -45,8 +48,12 @@ with gr.Blocks() as demo:
|
||||
)
|
||||
with gr.Column(elem_classes=["my-column"]):
|
||||
with gr.Group(elem_classes=["my-group"]):
|
||||
audio = WebRTC(label="Stream", rtc_configuration=rtc_configuration,
|
||||
mode="receive", modality="audio")
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="audio",
|
||||
)
|
||||
num_steps = gr.Slider(
|
||||
label="Number of Steps",
|
||||
minimum=1,
|
||||
@@ -57,8 +64,7 @@ with gr.Blocks() as demo:
|
||||
button = gr.Button("Generate")
|
||||
|
||||
audio.stream(
|
||||
fn=generation, inputs=[num_steps], outputs=[audio],
|
||||
trigger=button.click
|
||||
fn=generation, inputs=[num_steps], outputs=[audio], trigger=button.click
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import os
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import WebRTC
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
from pydub import AudioSegment
|
||||
|
||||
|
||||
from twilio.rest import Client
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
@@ -24,10 +23,16 @@ else:
|
||||
|
||||
import time
|
||||
|
||||
|
||||
def generation(num_steps):
|
||||
for _ in range(num_steps):
|
||||
segment = AudioSegment.from_file("/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav")
|
||||
yield (segment.frame_rate, np.array(segment.get_array_of_samples()).reshape(1, -1))
|
||||
segment = AudioSegment.from_file(
|
||||
"/Users/freddy/sources/gradio/demo/audio_debugger/cantina.wav"
|
||||
)
|
||||
yield (
|
||||
segment.frame_rate,
|
||||
np.array(segment.get_array_of_samples()).reshape(1, -1),
|
||||
)
|
||||
time.sleep(3.5)
|
||||
|
||||
|
||||
@@ -48,9 +53,12 @@ with gr.Blocks() as demo:
|
||||
gr.Slider()
|
||||
with gr.Column():
|
||||
# audio = gr.Audio(interactive=False)
|
||||
audio = WebRTC(label="Stream", rtc_configuration=rtc_configuration,
|
||||
mode="receive", modality="audio")
|
||||
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
61
demo/echo_conversation.py
Normal file
61
demo/echo_conversation.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import logging
|
||||
|
||||
# Configure the root logger to WARNING to suppress debug messages from other libraries
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
|
||||
# Create a console handler
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Create a formatter
|
||||
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
|
||||
console_handler.setFormatter(formatter)
|
||||
|
||||
# Configure the logger for your specific library
|
||||
logger = logging.getLogger("gradio_webrtc")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
|
||||
import time
|
||||
from queue import Queue
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
from gradio_webrtc import StreamHandler, WebRTC
|
||||
|
||||
|
||||
class EchoHandler(StreamHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.queue = Queue()
|
||||
|
||||
def receive(self, frame: tuple[int, np.ndarray] | np.ndarray) -> None:
|
||||
self.queue.put(frame)
|
||||
|
||||
def emit(self) -> None:
|
||||
return self.queue.get()
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
<h1 style='text-align: center'>
|
||||
Conversational AI (Powered by WebRTC ⚡️)
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
with gr.Column():
|
||||
with gr.Group():
|
||||
audio = WebRTC(
|
||||
label="Stream",
|
||||
rtc_configuration=None,
|
||||
mode="send-receive",
|
||||
modality="audio",
|
||||
)
|
||||
|
||||
audio.stream(fn=EchoHandler(), inputs=[audio], outputs=[audio], time_limit=15)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
@@ -1,8 +1,8 @@
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import onnxruntime
|
||||
|
||||
from utils import draw_detections
|
||||
|
||||
|
||||
@@ -120,8 +120,9 @@ class YOLOv10:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import requests
|
||||
import tempfile
|
||||
|
||||
import requests
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
model_file = hf_hub_download(
|
||||
|
||||
139
demo/space.py
139
demo/space.py
@@ -1,26 +1,92 @@
|
||||
|
||||
import gradio as gr
|
||||
import os
|
||||
|
||||
_docs = {'WebRTC':
|
||||
{'description': 'Stream audio/video with WebRTC',
|
||||
'members': {'__init__':
|
||||
{
|
||||
'rtc_configuration': {'type': 'dict[str, Any] | None', 'default': 'None', 'description': "The configration dictionary to pass to the RTCPeerConnection constructor. If None, the default configuration is used."},
|
||||
'height': {'type': 'int | str | None', 'default': 'None', 'description': 'The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.'},
|
||||
'width': {'type': 'int | str | None', 'default': 'None', 'description': 'The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.'},
|
||||
'label': {'type': 'str | None', 'default': 'None', 'description': 'the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.'},
|
||||
'show_label': {'type': 'bool | None', 'default': 'None', 'description': 'if True, will display label.'}, 'container': {'type': 'bool', 'default': 'True', 'description': 'if True, will place the component in a container - providing some extra padding around the border.'},
|
||||
'scale': {'type': 'int | None', 'default': 'None', 'description': 'relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.'},
|
||||
'min_width': {'type': 'int', 'default': '160', 'description': 'minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.'},
|
||||
'interactive': {'type': 'bool | None', 'default': 'None', 'description': 'if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.'}, 'visible': {'type': 'bool', 'default': 'True', 'description': 'if False, component will be hidden.'},
|
||||
'elem_id': {'type': 'str | None', 'default': 'None', 'description': 'an optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.'},
|
||||
'elem_classes': {'type': 'list[str] | str | None', 'default': 'None', 'description': 'an optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.'},
|
||||
'render': {'type': 'bool', 'default': 'True', 'description': 'if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.'},
|
||||
'key': {'type': 'int | str | None', 'default': 'None', 'description': 'if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.'},
|
||||
'mirror_webcam': {'type': 'bool', 'default': 'True', 'description': 'if True webcam will be mirrored. Default is True.'},
|
||||
},
|
||||
'events': {'tick': {'type': None, 'default': None, 'description': ''}}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'WebRTC': []}}}
|
||||
import gradio as gr
|
||||
|
||||
_docs = {
|
||||
"WebRTC": {
|
||||
"description": "Stream audio/video with WebRTC",
|
||||
"members": {
|
||||
"__init__": {
|
||||
"rtc_configuration": {
|
||||
"type": "dict[str, Any] | None",
|
||||
"default": "None",
|
||||
"description": "The configration dictionary to pass to the RTCPeerConnection constructor. If None, the default configuration is used.",
|
||||
},
|
||||
"height": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"width": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed video file, but will affect the displayed video.",
|
||||
},
|
||||
"label": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.",
|
||||
},
|
||||
"show_label": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will display label.",
|
||||
},
|
||||
"container": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True, will place the component in a container - providing some extra padding around the border.",
|
||||
},
|
||||
"scale": {
|
||||
"type": "int | None",
|
||||
"default": "None",
|
||||
"description": "relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.",
|
||||
},
|
||||
"min_width": {
|
||||
"type": "int",
|
||||
"default": "160",
|
||||
"description": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.",
|
||||
},
|
||||
"interactive": {
|
||||
"type": "bool | None",
|
||||
"default": "None",
|
||||
"description": "if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.",
|
||||
},
|
||||
"visible": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will be hidden.",
|
||||
},
|
||||
"elem_id": {
|
||||
"type": "str | None",
|
||||
"default": "None",
|
||||
"description": "an optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"elem_classes": {
|
||||
"type": "list[str] | str | None",
|
||||
"default": "None",
|
||||
"description": "an optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.",
|
||||
},
|
||||
"render": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.",
|
||||
},
|
||||
"key": {
|
||||
"type": "int | str | None",
|
||||
"default": "None",
|
||||
"description": "if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.",
|
||||
},
|
||||
"mirror_webcam": {
|
||||
"type": "bool",
|
||||
"default": "True",
|
||||
"description": "if True webcam will be mirrored. Default is True.",
|
||||
},
|
||||
},
|
||||
"events": {"tick": {"type": None, "default": None, "description": ""}},
|
||||
},
|
||||
"__meta__": {"additional_interfaces": {}, "user_fn_refs": {"WebRTC": []}},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -36,16 +102,19 @@ with gr.Blocks(
|
||||
),
|
||||
) as demo:
|
||||
gr.Markdown(
|
||||
"""
|
||||
"""
|
||||
<h1 style='text-align: center; margin-bottom: 1rem'> Gradio WebRTC ⚡️ </h1>
|
||||
|
||||
<div style="display: flex; flex-direction: row; justify-content: center">
|
||||
<img style="display: block; padding-right: 5px; height: 20px;" alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.5%20-%20orange">
|
||||
<a href="https://github.com/freddyaboulton/gradio-webrtc" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/github-white?logo=github&logoColor=black"></a>
|
||||
</div>
|
||||
""", elem_classes=["md-custom"], header_links=True)
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
gr.Markdown(
|
||||
"""
|
||||
"""
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
@@ -195,17 +264,24 @@ with gr.Blocks() as demo:
|
||||
rtc = WebRTC(rtc_configuration=rtc_configuration, ...)
|
||||
...
|
||||
```
|
||||
""", elem_classes=["md-custom"], header_links=True)
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
|
||||
gr.Markdown("""
|
||||
gr.Markdown(
|
||||
"""
|
||||
##
|
||||
""", elem_classes=["md-custom"], header_links=True)
|
||||
""",
|
||||
elem_classes=["md-custom"],
|
||||
header_links=True,
|
||||
)
|
||||
|
||||
gr.ParamViewer(value=_docs["WebRTC"]["members"]["__init__"], linkify=[])
|
||||
|
||||
|
||||
demo.load(None, js=r"""function() {
|
||||
demo.load(
|
||||
None,
|
||||
js=r"""function() {
|
||||
const refs = {};
|
||||
const user_fn_refs = {
|
||||
WebRTC: [], };
|
||||
@@ -239,6 +315,7 @@ with gr.Blocks() as demo:
|
||||
})
|
||||
}
|
||||
|
||||
""")
|
||||
""",
|
||||
)
|
||||
|
||||
demo.launch()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
class_names = [
|
||||
"person",
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
import cv2
|
||||
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
@@ -24,7 +24,6 @@ else:
|
||||
def generation(input_video):
|
||||
cap = cv2.VideoCapture(input_video)
|
||||
|
||||
|
||||
iterating = True
|
||||
|
||||
while iterating:
|
||||
@@ -35,6 +34,7 @@ def generation(input_video):
|
||||
display_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
yield display_frame
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
@@ -47,11 +47,17 @@ with gr.Blocks() as demo:
|
||||
with gr.Column():
|
||||
input_video = gr.Video(sources="upload")
|
||||
with gr.Column():
|
||||
output_video = WebRTC(label="Video Stream", rtc_configuration=rtc_configuration,
|
||||
mode="receive", modality="video")
|
||||
output_video = WebRTC(
|
||||
label="Video Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="video",
|
||||
)
|
||||
output_video.stream(
|
||||
fn=generation, inputs=[input_video], outputs=[output_video],
|
||||
trigger=input_video.upload
|
||||
fn=generation,
|
||||
inputs=[input_video],
|
||||
outputs=[output_video],
|
||||
trigger=input_video.upload,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import gradio as gr
|
||||
from gradio_webrtc import WebRTC
|
||||
from twilio.rest import Client
|
||||
import os
|
||||
import cv2
|
||||
|
||||
|
||||
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
|
||||
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
|
||||
@@ -30,7 +30,6 @@ def generation():
|
||||
yield frame
|
||||
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(
|
||||
"""
|
||||
@@ -39,12 +38,15 @@ with gr.Blocks() as demo:
|
||||
</h1>
|
||||
"""
|
||||
)
|
||||
output_video = WebRTC(label="Video Stream", rtc_configuration=rtc_configuration,
|
||||
mode="receive", modality="video")
|
||||
output_video = WebRTC(
|
||||
label="Video Stream",
|
||||
rtc_configuration=rtc_configuration,
|
||||
mode="receive",
|
||||
modality="video",
|
||||
)
|
||||
button = gr.Button("Start", variant="primary")
|
||||
output_video.stream(
|
||||
fn=generation, inputs=None, outputs=[output_video],
|
||||
trigger=button.click
|
||||
fn=generation, inputs=None, outputs=[output_video], trigger=button.click
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import type { LoadingStatus } from "@gradio/statustracker";
|
||||
import StaticVideo from "./shared/StaticVideo.svelte";
|
||||
import StaticAudio from "./shared/StaticAudio.svelte";
|
||||
import InteractiveAudio from "./shared/InteractiveAudio.svelte";
|
||||
|
||||
export let elem_id = "";
|
||||
export let elem_classes: string[] = [];
|
||||
@@ -31,14 +32,14 @@
|
||||
export let time_limit: number | null = null;
|
||||
export let modality: "video" | "audio" = "video";
|
||||
export let mode: "send-receive" | "receive" = "send-receive";
|
||||
export let track_constraints: MediaTrackConstraints = {};
|
||||
|
||||
let dragging = false;
|
||||
|
||||
$: console.log("value", value);
|
||||
</script>
|
||||
|
||||
{#if mode == "receive" && modality === "video"}
|
||||
<Block
|
||||
<Block
|
||||
{visible}
|
||||
variant={"solid"}
|
||||
border_mode={dragging ? "focus" : "base"}
|
||||
@@ -59,6 +60,7 @@
|
||||
on:clear_status={() => gradio.dispatch("clear_status", loading_status)}
|
||||
/>
|
||||
|
||||
{#if mode == "receive" && modality === "video"}
|
||||
<StaticVideo
|
||||
bind:value={value}
|
||||
{label}
|
||||
@@ -68,27 +70,7 @@
|
||||
on:tick={() => gradio.dispatch("tick")}
|
||||
on:error={({ detail }) => gradio.dispatch("error", detail)}
|
||||
/>
|
||||
</Block>
|
||||
|
||||
{:else if mode == "receive" && modality === "audio"}
|
||||
<Block
|
||||
variant={"solid"}
|
||||
border_mode={dragging ? "focus" : "base"}
|
||||
padding={false}
|
||||
allow_overflow={false}
|
||||
{elem_id}
|
||||
{elem_classes}
|
||||
{visible}
|
||||
{container}
|
||||
{scale}
|
||||
{min_width}
|
||||
>
|
||||
<StatusTracker
|
||||
autoscroll={gradio.autoscroll}
|
||||
i18n={gradio.i18n}
|
||||
{...loading_status}
|
||||
on:clear_status={() => gradio.dispatch("clear_status", loading_status)}
|
||||
/>
|
||||
{:else if mode == "receive" && modality === "audio"}
|
||||
<StaticAudio
|
||||
bind:value={value}
|
||||
{label}
|
||||
@@ -99,28 +81,7 @@
|
||||
on:tick={() => gradio.dispatch("tick")}
|
||||
on:error={({ detail }) => gradio.dispatch("error", detail)}
|
||||
/>
|
||||
</Block>
|
||||
{:else if mode === "send-receive" && modality === "video"}
|
||||
<Block
|
||||
{visible}
|
||||
variant={"solid"}
|
||||
border_mode={dragging ? "focus" : "base"}
|
||||
padding={false}
|
||||
{elem_id}
|
||||
{elem_classes}
|
||||
{height}
|
||||
{width}
|
||||
{container}
|
||||
{scale}
|
||||
{min_width}
|
||||
allow_overflow={false}
|
||||
>
|
||||
<StatusTracker
|
||||
autoscroll={gradio.autoscroll}
|
||||
i18n={gradio.i18n}
|
||||
{...loading_status}
|
||||
on:clear_status={() => gradio.dispatch("clear_status", loading_status)}
|
||||
/>
|
||||
{:else if mode === "send-receive" && modality === "video"}
|
||||
<Video
|
||||
bind:value={value}
|
||||
{label}
|
||||
@@ -145,5 +106,18 @@
|
||||
>
|
||||
<UploadText i18n={gradio.i18n} type="video" />
|
||||
</Video>
|
||||
</Block>
|
||||
{/if}
|
||||
{:else if mode === "send-receive" && modality === "audio"}
|
||||
<InteractiveAudio
|
||||
bind:value={value}
|
||||
{label}
|
||||
{show_label}
|
||||
{server}
|
||||
{rtc_configuration}
|
||||
{time_limit}
|
||||
{track_constraints}
|
||||
i18n={gradio.i18n}
|
||||
on:tick={() => gradio.dispatch("tick")}
|
||||
on:error={({ detail }) => gradio.dispatch("error", detail)}
|
||||
/>
|
||||
{/if}
|
||||
</Block>
|
||||
|
||||
812
frontend/package-lock.json
generated
812
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -9,15 +9,15 @@
|
||||
"dependencies": {
|
||||
"@ffmpeg/ffmpeg": "^0.12.10",
|
||||
"@ffmpeg/util": "^0.12.1",
|
||||
"@gradio/atoms": "0.9.0",
|
||||
"@gradio/client": "1.6.0",
|
||||
"@gradio/atoms": "0.9.2",
|
||||
"@gradio/client": "1.7.0",
|
||||
"@gradio/icons": "0.8.0",
|
||||
"@gradio/image": "0.16.0",
|
||||
"@gradio/markdown": "^0.10.0",
|
||||
"@gradio/statustracker": "0.8.0",
|
||||
"@gradio/upload": "0.13.0",
|
||||
"@gradio/image": "0.16.4",
|
||||
"@gradio/markdown": "^0.10.3",
|
||||
"@gradio/statustracker": "0.9.1",
|
||||
"@gradio/upload": "0.13.3",
|
||||
"@gradio/utils": "0.7.0",
|
||||
"@gradio/wasm": "0.14.0",
|
||||
"@gradio/wasm": "0.14.2",
|
||||
"hls.js": "^1.5.16",
|
||||
"mrmime": "^2.0.0"
|
||||
},
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import { onMount, onDestroy } from 'svelte';
|
||||
|
||||
export let numBars = 16;
|
||||
export let stream_state: "open" | "closed" = "closed";
|
||||
export let stream_state: "open" | "closed" | "waiting" = "closed";
|
||||
export let audio_source: HTMLAudioElement;
|
||||
|
||||
let audioContext: AudioContext;
|
||||
@@ -25,7 +25,6 @@
|
||||
});
|
||||
|
||||
function setupAudioContext() {
|
||||
console.log("set up")
|
||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||
analyser = audioContext.createAnalyser();
|
||||
console.log("audio_source", audio_source.srcObject);
|
||||
@@ -50,16 +49,6 @@
|
||||
|
||||
animationId = requestAnimationFrame(updateBars);
|
||||
}
|
||||
|
||||
function toggleMute() {
|
||||
if (audio_source && audio_source.srcObject) {
|
||||
const audioTracks = (audio_source.srcObject as MediaStream).getAudioTracks();
|
||||
audioTracks.forEach(track => {
|
||||
track.enabled = !track.enabled;
|
||||
});
|
||||
is_muted = !audioTracks[0].enabled;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
</script>
|
||||
@@ -69,17 +58,14 @@
|
||||
{#each Array(numBars) as _}
|
||||
<div class="box"></div>
|
||||
{/each}
|
||||
</div>
|
||||
<button class="muteButton" on:click={toggleMute}>
|
||||
{is_muted ? '🔈' : '🔊'}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<style>
|
||||
.waveContainer {
|
||||
position: relative;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
display: flex;
|
||||
min-height: 100px;
|
||||
max-height: 128px;
|
||||
}
|
||||
|
||||
@@ -98,15 +84,4 @@
|
||||
border-radius: 8px;
|
||||
transition: transform 0.05s ease;
|
||||
}
|
||||
|
||||
.muteButton {
|
||||
margin-top: 10px;
|
||||
padding: 10px 20px;
|
||||
font-size: 24px;
|
||||
cursor: pointer;
|
||||
background: none;
|
||||
border: none;
|
||||
border-radius: 5px;
|
||||
color: var(--color-accent);
|
||||
}
|
||||
|
||||
239
frontend/shared/InteractiveAudio.svelte
Normal file
239
frontend/shared/InteractiveAudio.svelte
Normal file
@@ -0,0 +1,239 @@
|
||||
<script lang="ts">
|
||||
import {
|
||||
BlockLabel,
|
||||
} from "@gradio/atoms";
|
||||
import type { I18nFormatter } from "@gradio/utils";
|
||||
import { createEventDispatcher } from "svelte";
|
||||
import { onMount } from "svelte";
|
||||
import { StreamingBar } from "@gradio/statustracker";
|
||||
import {
|
||||
Circle,
|
||||
Square,
|
||||
Spinner,
|
||||
Music
|
||||
} from "@gradio/icons";
|
||||
|
||||
import { start, stop } from "./webrtc_utils";
|
||||
import AudioWave from "./AudioWave.svelte";
|
||||
|
||||
|
||||
|
||||
export let value: string | null = null;
|
||||
export let label: string | undefined = undefined;
|
||||
export let show_label = true;
|
||||
export let rtc_configuration: Object | null = null;
|
||||
export let i18n: I18nFormatter;
|
||||
export let time_limit: number | null = null;
|
||||
export let track_constraints: MediaTrackConstraints = {};
|
||||
let _time_limit: number | null = null;
|
||||
|
||||
$: console.log("time_limit", time_limit);
|
||||
|
||||
export let server: {
|
||||
offer: (body: any) => Promise<any>;
|
||||
};
|
||||
|
||||
let stream_state: "open" | "closed" | "waiting" = "closed";
|
||||
let audio_player: HTMLAudioElement;
|
||||
let pc: RTCPeerConnection;
|
||||
let _webrtc_id = null;
|
||||
|
||||
|
||||
const dispatch = createEventDispatcher<{
|
||||
tick: undefined;
|
||||
error: string
|
||||
play: undefined;
|
||||
stop: undefined;
|
||||
}>();
|
||||
|
||||
|
||||
onMount(() => {
|
||||
window.setInterval(() => {
|
||||
if (stream_state == "open") {
|
||||
dispatch("tick");
|
||||
}
|
||||
}, 1000);
|
||||
}
|
||||
)
|
||||
|
||||
async function start_stream(): Promise<void> {
|
||||
if( stream_state === "open"){
|
||||
stop(pc);
|
||||
stream_state = "closed";
|
||||
_time_limit = null;
|
||||
return;
|
||||
}
|
||||
_webrtc_id = Math.random().toString(36).substring(2);
|
||||
value = _webrtc_id;
|
||||
pc = new RTCPeerConnection(rtc_configuration);
|
||||
pc.addEventListener("connectionstatechange",
|
||||
async (event) => {
|
||||
switch(pc.connectionState) {
|
||||
case "connected":
|
||||
console.info("connected");
|
||||
stream_state = "open";
|
||||
_time_limit = time_limit;
|
||||
break;
|
||||
case "disconnected":
|
||||
console.info("closed");
|
||||
stream_state = "closed";
|
||||
_time_limit = null;
|
||||
stop(pc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
)
|
||||
stream_state = "waiting"
|
||||
let stream = null
|
||||
|
||||
try {
|
||||
stream = await navigator.mediaDevices.getUserMedia({ audio: track_constraints });
|
||||
} catch (err) {
|
||||
if (!navigator.mediaDevices) {
|
||||
dispatch("error", i18n("audio.no_device_support"));
|
||||
return;
|
||||
}
|
||||
if (err instanceof DOMException && err.name == "NotAllowedError") {
|
||||
dispatch("error", i18n("audio.allow_recording_access"));
|
||||
return;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
if (stream == null) return;
|
||||
|
||||
start(stream, pc, audio_player, server.offer, _webrtc_id, "audio").then((connection) => {
|
||||
pc = connection;
|
||||
}).catch(() => {
|
||||
console.info("catching")
|
||||
dispatch("error", "Too many concurrent users. Come back later!");
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
</script>
|
||||
|
||||
<BlockLabel
|
||||
{show_label}
|
||||
Icon={Music}
|
||||
float={false}
|
||||
label={label || i18n("audio.audio")}
|
||||
/>
|
||||
<div class="audio-container">
|
||||
<audio
|
||||
class="standard-player"
|
||||
class:hidden={value === "__webrtc_value__"}
|
||||
on:load
|
||||
bind:this={audio_player}
|
||||
on:ended={() => dispatch("stop")}
|
||||
on:play={() => dispatch("play")}
|
||||
/>
|
||||
<AudioWave audio_source={audio_player} {stream_state}/>
|
||||
<StreamingBar time_limit={_time_limit} />
|
||||
<div class="button-wrap">
|
||||
<button
|
||||
on:click={start_stream}
|
||||
aria-label={"start stream"}
|
||||
>
|
||||
{#if stream_state === "waiting"}
|
||||
<div class="icon-with-text" style="width:var(--size-24);">
|
||||
<div class="icon color-primary" title="spinner">
|
||||
<Spinner />
|
||||
</div>
|
||||
{i18n("audio.waiting")}
|
||||
</div>
|
||||
{:else if stream_state === "open"}
|
||||
<div class="icon-with-text">
|
||||
<div class="icon color-primary" title="stop recording">
|
||||
<Square />
|
||||
</div>
|
||||
{i18n("audio.stop")}
|
||||
</div>
|
||||
{:else}
|
||||
<div class="icon-with-text">
|
||||
<div class="icon color-primary" title="start recording">
|
||||
<Circle />
|
||||
</div>
|
||||
{i18n("audio.record")}
|
||||
</div>
|
||||
{/if}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<style>
|
||||
|
||||
.audio-container {
|
||||
display: flex;
|
||||
height: 100%;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
|
||||
:global(::part(wrapper)) {
|
||||
margin-bottom: var(--size-2);
|
||||
}
|
||||
|
||||
.standard-player {
|
||||
width: 100%;
|
||||
padding: var(--size-2);
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
|
||||
.button-wrap {
|
||||
margin-top: var(--size-2);
|
||||
margin-bottom: var(--size-2);
|
||||
background-color: var(--block-background-fill);
|
||||
border: 1px solid var(--border-color-primary);
|
||||
border-radius: var(--radius-xl);
|
||||
padding: var(--size-1-5);
|
||||
display: flex;
|
||||
bottom: var(--size-2);
|
||||
box-shadow: var(--shadow-drop-lg);
|
||||
border-radius: var(--radius-xl);
|
||||
line-height: var(--size-3);
|
||||
color: var(--button-secondary-text-color);
|
||||
}
|
||||
|
||||
.icon-with-text {
|
||||
width: var(--size-20);
|
||||
align-items: center;
|
||||
margin: 0 var(--spacing-xl);
|
||||
display: flex;
|
||||
justify-content: space-evenly;
|
||||
}
|
||||
|
||||
@media (--screen-md) {
|
||||
button {
|
||||
bottom: var(--size-4);
|
||||
}
|
||||
}
|
||||
|
||||
@media (--screen-xl) {
|
||||
button {
|
||||
bottom: var(--size-8);
|
||||
}
|
||||
}
|
||||
|
||||
.icon {
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.color-primary {
|
||||
fill: var(--primary-600);
|
||||
stroke: var(--primary-600);
|
||||
color: var(--primary-600);
|
||||
}
|
||||
</style>
|
||||
@@ -20,6 +20,7 @@
|
||||
offer: (body: any) => Promise<any>;
|
||||
};
|
||||
export let rtc_configuration: Object;
|
||||
export let track_constraints: MediaTrackConstraints = {};
|
||||
|
||||
const dispatch = createEventDispatcher<{
|
||||
change: FileData | null;
|
||||
@@ -48,6 +49,7 @@
|
||||
{rtc_configuration}
|
||||
{include_audio}
|
||||
{time_limit}
|
||||
{track_constraints}
|
||||
on:error
|
||||
on:start_recording
|
||||
on:stop_recording
|
||||
@@ -62,22 +64,6 @@
|
||||
</div>
|
||||
|
||||
<style>
|
||||
.file-name {
|
||||
padding: var(--size-6);
|
||||
font-size: var(--text-xxl);
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
.file-size {
|
||||
padding: var(--size-2);
|
||||
font-size: var(--text-xl);
|
||||
}
|
||||
|
||||
.upload-container {
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.video-container {
|
||||
display: flex;
|
||||
height: 100%;
|
||||
|
||||
@@ -17,13 +17,12 @@
|
||||
export let show_label = true;
|
||||
export let rtc_configuration: Object | null = null;
|
||||
export let i18n: I18nFormatter;
|
||||
export let autoplay: boolean = true;
|
||||
|
||||
export let server: {
|
||||
offer: (body: any) => Promise<any>;
|
||||
};
|
||||
|
||||
let stream_state = "closed";
|
||||
let stream_state: "open" | "closed" | "waiting" = "closed";
|
||||
let audio_player: HTMLAudioElement;
|
||||
let pc: RTCPeerConnection;
|
||||
let _webrtc_id = Math.random().toString(36).substring(2);
|
||||
@@ -36,7 +35,6 @@
|
||||
stop: undefined;
|
||||
}>();
|
||||
|
||||
|
||||
onMount(() => {
|
||||
window.setInterval(() => {
|
||||
if (stream_state == "open") {
|
||||
@@ -46,33 +44,43 @@
|
||||
}
|
||||
)
|
||||
|
||||
$: if( value === "start_webrtc_stream") {
|
||||
stream_state = "connecting";
|
||||
value = _webrtc_id;
|
||||
pc = new RTCPeerConnection(rtc_configuration);
|
||||
pc.addEventListener("connectionstatechange",
|
||||
async (event) => {
|
||||
switch(pc.connectionState) {
|
||||
case "connected":
|
||||
console.info("connected");
|
||||
stream_state = "open";
|
||||
break;
|
||||
case "disconnected":
|
||||
console.info("closed");
|
||||
stop(pc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
async function start_stream(value: string): Promise<string> {
|
||||
if( value === "start_webrtc_stream") {
|
||||
stream_state = "waiting";
|
||||
_webrtc_id = Math.random().toString(36).substring(2)
|
||||
value = _webrtc_id;
|
||||
console.log("set value to ", value);
|
||||
pc = new RTCPeerConnection(rtc_configuration);
|
||||
pc.addEventListener("connectionstatechange",
|
||||
async (event) => {
|
||||
switch(pc.connectionState) {
|
||||
case "connected":
|
||||
console.info("connected");
|
||||
stream_state = "open";
|
||||
break;
|
||||
case "disconnected":
|
||||
console.info("closed");
|
||||
stop(pc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
start(null, pc, audio_player, server.offer, _webrtc_id, "audio").then((connection) => {
|
||||
pc = connection;
|
||||
}).catch(() => {
|
||||
console.info("catching")
|
||||
dispatch("error", "Too many concurrent users. Come back later!");
|
||||
});
|
||||
}
|
||||
)
|
||||
let stream = null;
|
||||
start(stream, pc, audio_player, server.offer, _webrtc_id, "audio").then((connection) => {
|
||||
pc = connection;
|
||||
}).catch(() => {
|
||||
console.info("catching")
|
||||
dispatch("error", "Too many concurrent users. Come back later!");
|
||||
});
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
$: start_stream(value).then((val) => {
|
||||
value = val;
|
||||
});
|
||||
|
||||
|
||||
|
||||
@@ -93,23 +101,28 @@
|
||||
on:play={() => dispatch("play")}
|
||||
/>
|
||||
{#if value !== "__webrtc_value__"}
|
||||
<div class="audio-container">
|
||||
<AudioWave audio_source={audio_player} {stream_state}/>
|
||||
</div>
|
||||
{/if}
|
||||
{#if value === "__webrtc_value__"}
|
||||
<Empty size="small">
|
||||
<Music />
|
||||
</Empty>
|
||||
<Empty size="small">
|
||||
<Music />
|
||||
</Empty>
|
||||
{/if}
|
||||
|
||||
|
||||
<style>
|
||||
:global(::part(wrapper)) {
|
||||
margin-bottom: var(--size-2);
|
||||
}
|
||||
.audio-container {
|
||||
display: flex;
|
||||
height: 100%;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.standard-player {
|
||||
width: 100%;
|
||||
padding: var(--size-2);
|
||||
}
|
||||
|
||||
.hidden {
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
)
|
||||
|
||||
$: if( value === "start_webrtc_stream") {
|
||||
_webrtc_id = Math.random().toString(36).substring(2);
|
||||
value = _webrtc_id;
|
||||
pc = new RTCPeerConnection(rtc_configuration);
|
||||
pc.addEventListener("connectionstatechange",
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
};
|
||||
|
||||
let canvas: HTMLCanvasElement;
|
||||
export let track_constraints: MediaTrackConstraints | null = null;
|
||||
export let rtc_configuration: Object;
|
||||
export let stream_every = 1;
|
||||
export let server: {
|
||||
@@ -63,7 +64,7 @@
|
||||
const target = event.target as HTMLInputElement;
|
||||
const device_id = target.value;
|
||||
|
||||
await get_video_stream(include_audio, video_source, device_id).then(
|
||||
await get_video_stream(include_audio, video_source, device_id, track_constraints).then(
|
||||
async (local_stream) => {
|
||||
stream = local_stream;
|
||||
selected_device =
|
||||
@@ -137,7 +138,7 @@
|
||||
}
|
||||
)
|
||||
stream_state = "waiting"
|
||||
webrtc_id = _webrtc_id;
|
||||
webrtc_id = Math.random().toString(36).substring(2);
|
||||
start(stream, pc, video_source, server.offer, webrtc_id).then((connection) => {
|
||||
pc = connection;
|
||||
}).catch(() => {
|
||||
|
||||
@@ -18,15 +18,16 @@ export function set_local_stream(
|
||||
export async function get_video_stream(
|
||||
include_audio: boolean,
|
||||
video_source: HTMLVideoElement,
|
||||
device_id?: string
|
||||
device_id?: string,
|
||||
track_constraints?: MediaTrackConstraints,
|
||||
): Promise<MediaStream> {
|
||||
const size = {
|
||||
width: { ideal: 1920 },
|
||||
height: { ideal: 1440 }
|
||||
const fallback_constraints = track_constraints || {
|
||||
width: { ideal: 500 },
|
||||
height: { ideal: 500 }
|
||||
};
|
||||
|
||||
const constraints = {
|
||||
video: device_id ? { deviceId: { exact: device_id }, ...size } : size,
|
||||
video: device_id ? { deviceId: { exact: device_id }, ...fallback_constraints } : fallback_constraints,
|
||||
audio: include_audio
|
||||
};
|
||||
|
||||
|
||||
@@ -35,7 +35,6 @@ export function createPeerConnection(pc, node) {
|
||||
node.volume = 1.0; // Ensure volume is up
|
||||
node.muted = false;
|
||||
node.autoplay = true;
|
||||
|
||||
// Attempt to play (needed for some browsers)
|
||||
node.play().catch(e => console.debug("Autoplay failed:", e));
|
||||
}
|
||||
@@ -49,8 +48,6 @@ export async function start(stream, pc: RTCPeerConnection, node, server_fn, webr
|
||||
pc = createPeerConnection(pc, node);
|
||||
if (stream) {
|
||||
stream.getTracks().forEach((track) => {
|
||||
track.applyConstraints({ frameRate: { max: 30 } });
|
||||
|
||||
console.debug("Track stream callback", track);
|
||||
pc.addTrack(track, stream);
|
||||
});
|
||||
@@ -137,6 +134,7 @@ export function stop(pc: RTCPeerConnection) {
|
||||
// close local audio / video
|
||||
if (pc.getSenders()) {
|
||||
pc.getSenders().forEach((sender) => {
|
||||
console.log("sender", sender);
|
||||
if (sender.track && sender.track.stop) sender.track.stop();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "gradio_webrtc"
|
||||
version = "0.0.5"
|
||||
version = "0.0.6"
|
||||
description = "Stream images in realtime with webrtc"
|
||||
readme = "README.md"
|
||||
license = "apache-2.0"
|
||||
|
||||
Reference in New Issue
Block a user