diff --git a/demo/voice_text_editor/README.md b/demo/voice_text_editor/README.md new file mode 100644 index 0000000..710bf91 --- /dev/null +++ b/demo/voice_text_editor/README.md @@ -0,0 +1,19 @@ +--- +title: Voice Text Editor +emoji: 📝 +colorFrom: purple +colorTo: red +sdk: gradio +sdk_version: 5.16.0 +app_file: app.py +pinned: false +license: mit +short_description: Edit text documents with your voice! +tags: [webrtc, websocket, gradio, secret|HF_TOKEN, secret|SAMBANOVA_API_KEY] +--- + +# Voice Text Editor + +Edit text documents with your voice! + + diff --git a/demo/voice_text_editor/app.py b/demo/voice_text_editor/app.py new file mode 100644 index 0000000..744b51e --- /dev/null +++ b/demo/voice_text_editor/app.py @@ -0,0 +1,113 @@ +import os + +import gradio as gr +from dotenv import load_dotenv +from fastrtc import AdditionalOutputs, ReplyOnPause, Stream, get_stt_model +from openai import OpenAI + +load_dotenv() + +sambanova_client = OpenAI( + api_key=os.getenv("SAMBANOVA_API_KEY"), base_url="https://api.sambanova.ai/v1" +) +stt_model = get_stt_model() + + +SYSTEM_PROMPT = """You are an intelligent voice-activated text editor assistant. Your purpose is to help users create and modify text documents through voice commands. + +For each interaction: +1. You will receive the current state of a text document and a voice input from the user. +2. Determine if the input is: + a) A command to modify the document (e.g., "delete the last line", "capitalize that") + b) Content to be added to the document (e.g., "buy 12 eggs at the store") + c) A modification to existing content (e.g., "actually make that 24" to change "12" to "24") +3. Return ONLY the new document state after the changes have been applied. + +Example: + +CURRENT DOCUMENT: + + +Meeting notes: +- Buy GPUs +- Meet with Joe + +USER INPUT: Make that 100 GPUS + +NEW DOCUMENT STATE: + +Meeting notes: +- Buy 100 GPUs +- Meet with Joe + +Example 2: + +CURRENT DOCUMENT: + +Project Proposal + +USER INPUT: Make that a header + +NEW DOCUMENT STATE: + +# Project Proposal + +When handling commands: +- Apply the requested changes precisely to the document +- Support operations like adding, deleting, modifying, and moving text +- Understand contextual references like "that", "the last line", "the second paragraph" + +When handling content additions: +- Add the new text at the appropriate location (usually at the end or cursor position) +- Format it appropriately based on the document context +- If the user says to "add" or "insert" do not remove text that was already in the document. + +When handling content modifications: +- Identify what part of the document the user is referring to +- Apply the requested change while preserving the rest of the content +- Be smart about contextual references (e.g., "make that 24" should know to replace a number) + +NEVER include any text in the new document state that is not part of the user's input. +NEVER include the phrase "CURRENT DOCUMENT" in the new document state. +NEVER reword the user's input unless you are explicitly asked to do so. +""" + + +def edit(audio, current_document: str): + prompt = stt_model.stt(audio) + print(f"Prompt: {prompt}") + response = sambanova_client.chat.completions.create( + model="Meta-Llama-3.3-70B-Instruct", + messages=[ + {"role": "system", "content": SYSTEM_PROMPT}, + { + "role": "user", + "content": f"CURRENT DOCUMENT:\n\n{current_document}\n\nUSER INPUT: {prompt}", + }, + ], + max_tokens=200, + ) + doc = response.choices[0].message.content + yield AdditionalOutputs(doc) + + +doc = gr.Textbox(value="", label="Current Document") + + +stream = Stream( + ReplyOnPause(edit), + modality="audio", + mode="send", + additional_inputs=[doc], + additional_outputs=[doc], + additional_outputs_handler=lambda prev, current: current, + ui_args={"title": "Voice Text Editor with FastRTC 🗣️"}, +) + +if __name__ == "__main__": + if (mode := os.getenv("MODE")) == "UI": + stream.ui.launch(server_port=7860) + elif mode == "PHONE": + stream.fastphone(host="0.0.0.0", port=7860) + else: + stream.ui.launch(server_port=7860) diff --git a/docs/userguide/audio.md b/docs/userguide/audio.md index 0a6d25f..afa4040 100644 --- a/docs/userguide/audio.md +++ b/docs/userguide/audio.md @@ -151,97 +151,41 @@ The API is similar to `ReplyOnPause` with the addition of a `stop_words` paramet It is also possible to create asynchronous stream handlers. This is very convenient for accessing async APIs from major LLM developers, like Google and OpenAI. The main difference is that `receive`, `emit`, and `start_up` are now defined with `async def`. -Here is a complete example of using `AsyncStreamHandler` for using the Google Gemini real time API: +Here is aa simple example of using `AsyncStreamHandler`: === "Code" ``` py - from fastrtc import AsyncStreamHandler + from fastrtc import AsyncStreamHandler, wait_for_item import asyncio - import base64 - import os - import google.generativeai as genai - from google.generativeai.types import ( - LiveConnectConfig, SpeechConfig, - VoiceConfig, PrebuiltVoiceConfig - ) - class GeminiHandler(AsyncStreamHandler): + class AsyncEchoHandler(AsyncStreamHandler): """Handler for the Gemini API""" + + def __init__(self) -> None: + super().__init__() + self.queue = asyncio.Queue() - def __init__( - self, - expected_layout: Literal["mono"] = "mono", - output_sample_rate: int = 24000, - output_frame_size: int = 480, - ) -> None: - super().__init__( - expected_layout, - output_sample_rate, - output_frame_size, - input_sample_rate=16000, - ) - self.input_queue: asyncio.Queue = asyncio.Queue() - self.output_queue: asyncio.Queue = asyncio.Queue() - self.quit: asyncio.Event = asyncio.Event() + async def receive(self, frame: tuple[int, np.ndarray]) -> None: + self.queue.put(frame) - def copy(self) -> "GeminiHandler": - return GeminiHandler( - expected_layout="mono", - output_sample_rate=self.output_sample_rate, - output_frame_size=self.output_frame_size, - ) - - async def start_up(self): - await self.wait_for_args() - api_key, voice_name = self.latest_args[1:] - client = genai.Client( - api_key=api_key or os.getenv("GEMINI_API_KEY"), - http_options={"api_version": "v1alpha"}, - ) - config = LiveConnectConfig( - response_modalities=["AUDIO"], # type: ignore - speech_config=SpeechConfig( - voice_config=VoiceConfig( - prebuilt_voice_config=PrebuiltVoiceConfig( - voice_name=voice_name, - ) - ) - ), - ) - async with client.aio.live.connect( - model="gemini-2.0-flash-exp", config=config - ) as session: - async for audio in session.start_stream( - stream=self.stream(), mime_type="audio/pcm" - ): - if audio.data: - array = np.frombuffer(audio.data, dtype=np.int16) - self.output_queue.put_nowait(array) - - async def stream(self) -> AsyncGenerator[bytes, None]: - while not self.quit.is_set(): - try: - audio = await asyncio.wait_for(self.input_queue.get(), 0.1) - yield audio - except (asyncio.TimeoutError, TimeoutError): - pass - - async def receive(self, frame: tuple[int, np.ndarray]) -> None: - _, array = frame - array = array.squeeze() - audio_message = encode_audio(array) - self.input_queue.put_nowait(audio_message) - - async def emit(self) -> tuple[int, np.ndarray]: - array = await self.output_queue.get() - return (self.output_sample_rate, array) - - def shutdown(self) -> None: - self.quit.set() - self.args_set.clear() + async def emit(self) -> None: # (2) + return await wait_for_item(self.queue) + + def copy(self): + return AsyncEchoHandler() + + async def shutdown(self): # (3) + pass + + def start_up(self) -> None: # (4) + pass ``` +!!! tip + See [Talk To Gemini](https://huggingface.co/spaces/fastrtc/talk-to-gemini), [Talk To Openai](https://huggingface.co/spaces/fastrtc/talk-to-openai) for complete examples of `AsyncStreamHandler`s. + + ## Text To Speech You can use an on-device text to speech model if you have the `tts` extra installed. diff --git a/docs/userguide/gradio.md b/docs/userguide/gradio.md index 685053e..ba1682b 100644 --- a/docs/userguide/gradio.md +++ b/docs/userguide/gradio.md @@ -7,274 +7,37 @@ The automatic gradio UI is a great way to test your stream. However, you may wan To build a standalone Gradio application, you can use the `WebRTC` component and implement the `stream` event. Similarly to the `Stream` object, you must set the `mode` and `modality` arguments and pass in a `handler`. -Below are some common examples of how to use the `WebRTC` component. +In the `stream` event, you pass in your handler as well as the input and output components. + +``` py +import gradio as gr +from fastrtc import WebRTC, ReplyOnPause + +def response(audio: tuple[int, np.ndarray]): + """This function must yield audio frames""" + ... + yield audio -=== "Reply On Pause" - ``` py - import gradio as gr - from gradio_webrtc import WebRTC, ReplyOnPause - - def response(audio: tuple[int, np.ndarray]): # (1) - """This function must yield audio frames""" - ... - for numpy_array in generated_audio: - yield (sampling_rate, numpy_array, "mono") # (2) - - - with gr.Blocks() as demo: - gr.HTML( - """ -