mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-05 18:09:23 +08:00
Enforce modern typing (#258)
* Allow UP * Upgrade typing * test smolagents * Change to contextlib --------- Co-authored-by: Marcus Valtonen Örnhag <marcus.valtonen.ornhag@ericsson.com>
This commit is contained in:
committed by
GitHub
parent
a07e9439b6
commit
f70b27bd41
@@ -1,8 +1,8 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import os
|
||||
from collections.abc import AsyncGenerator
|
||||
from pathlib import Path
|
||||
from typing import AsyncGenerator
|
||||
|
||||
import librosa
|
||||
import numpy as np
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from functools import lru_cache
|
||||
from typing import Generator, Literal
|
||||
from collections.abc import Generator
|
||||
from functools import cache
|
||||
from typing import Literal
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
@@ -17,7 +18,7 @@ from numpy.typing import NDArray
|
||||
load_dotenv()
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
@cache
|
||||
def load_moonshine(
|
||||
model_name: Literal["moonshine/base", "moonshine/tiny"],
|
||||
) -> MoonshineOnnxModel:
|
||||
|
||||
@@ -3,7 +3,8 @@ import base64
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
from typing import AsyncGenerator, Literal
|
||||
from collections.abc import AsyncGenerator
|
||||
from typing import Literal
|
||||
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from fastrtc import (
|
||||
@@ -22,7 +21,7 @@ stt_model = get_stt_model()
|
||||
tts_model = get_tts_model()
|
||||
|
||||
# Conversation state to maintain history
|
||||
conversation_state: List[Dict[str, str]] = []
|
||||
conversation_state: list[dict[str, str]] = []
|
||||
|
||||
# System prompt for agent
|
||||
system_prompt = """You are a helpful assistant that can helps with finding places to
|
||||
@@ -78,9 +77,7 @@ def process_response(audio):
|
||||
response_content = agent.run(input_text)
|
||||
|
||||
# Convert response to audio using TTS model
|
||||
for audio_chunk in tts_model.stream_tts_sync(response_content or ""):
|
||||
# Yield the audio chunk
|
||||
yield audio_chunk
|
||||
yield from tts_model.stream_tts_sync(response_content or "")
|
||||
|
||||
|
||||
stream = Stream(
|
||||
|
||||
Reference in New Issue
Block a user