mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-05 18:09:23 +08:00
code
This commit is contained in:
@@ -176,7 +176,7 @@ if __name__ == "__main__":
|
|||||||
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
||||||
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
||||||
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
||||||
|
* The `emit` method SHOULD NOT block. If a frame is not ready to be sent, the method should return None.
|
||||||
|
|
||||||
## Deployment
|
## Deployment
|
||||||
|
|
||||||
|
|||||||
@@ -30,9 +30,8 @@ async def player_worker_decode(
|
|||||||
|
|
||||||
while not thread_quit.is_set():
|
while not thread_quit.is_set():
|
||||||
try:
|
try:
|
||||||
async with asyncio.timeout(5):
|
|
||||||
# Get next frame
|
# Get next frame
|
||||||
frame = await next_frame()
|
frame = await asyncio.wait_for(next_frame(), timeout=5)
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
if quit_on_none:
|
if quit_on_none:
|
||||||
@@ -68,7 +67,7 @@ async def player_worker_decode(
|
|||||||
await queue.put(processed_frame)
|
await queue.put(processed_frame)
|
||||||
logger.debug("Queue size utils.py: %s", queue.qsize())
|
logger.debug("Queue size utils.py: %s", queue.qsize())
|
||||||
|
|
||||||
except TimeoutError:
|
except (TimeoutError, asyncio.TimeoutError):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Timeout in frame processing cycle after %s seconds - resetting", 5
|
"Timeout in frame processing cycle after %s seconds - resetting", 5
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -214,7 +214,7 @@ if __name__ == "__main__":
|
|||||||
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
* An audio frame is represented as a tuple of (frame_rate, audio_samples) where `audio_samples` is a numpy array of shape (num_channels, num_samples).
|
||||||
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
* You can also specify the audio layout ("mono" or "stereo") in the emit method by retuning it as the third element of the tuple. If not specified, the default is "mono".
|
||||||
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
* The `time_limit` parameter is the maximum time in seconds the conversation will run. If the time limit is reached, the audio stream will stop.
|
||||||
|
* The `emit` method SHOULD NOT block. If a frame is not ready to be sent, the method should return None.
|
||||||
|
|
||||||
## Deployment
|
## Deployment
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ build-backend = "hatchling.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "gradio_webrtc"
|
name = "gradio_webrtc"
|
||||||
version = "0.0.6a3"
|
version = "0.0.6"
|
||||||
description = "Stream images in realtime with webrtc"
|
description = "Stream images in realtime with webrtc"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = "apache-2.0"
|
license = "apache-2.0"
|
||||||
|
|||||||
Reference in New Issue
Block a user